diff --git a/Makefile b/Makefile index 327f502..abdae96 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,7 @@ GO_SUBDIRS += cmd internal apis # ==================================================================================== # Setup Kubernetes tools -KIND_VERSION = v0.23.0 +KIND_VERSION = v0.24.0 UP_VERSION = v0.31.0 UP_CHANNEL = stable UPTEST_VERSION = v0.11.1 diff --git a/apis/api/v1alpha1/zz_gateway_terraformed.go b/apis/api/v1alpha1/zz_gateway_terraformed.go new file mode 100755 index 0000000..4b9c796 --- /dev/null +++ b/apis/api/v1alpha1/zz_gateway_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Gateway +func (mg *Gateway) GetTerraformResourceType() string { + return "yandex_api_gateway" +} + +// GetConnectionDetailsMapping for this Gateway +func (tr *Gateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Gateway +func (tr *Gateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Gateway +func (tr *Gateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Gateway +func (tr *Gateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Gateway +func (tr *Gateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Gateway +func (tr *Gateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Gateway +func (tr *Gateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Gateway +func (tr *Gateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Gateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Gateway) LateInitialize(attrs []byte) (bool, error) { + params := &GatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Gateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/api/v1alpha1/zz_gateway_types.go b/apis/api/v1alpha1/zz_gateway_types.go new file mode 100755 index 0000000..5f4d845 --- /dev/null +++ b/apis/api/v1alpha1/zz_gateway_types.go @@ -0,0 +1,362 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CanaryInitParameters struct { + + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + + // Percentage of requests, which will be processed by canary release. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CanaryObservation struct { + + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + + // Percentage of requests, which will be processed by canary release. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type CanaryParameters struct { + + // A set of values for variables in gateway specification. + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` + + // Percentage of requests, which will be processed by canary release. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type ConnectivityInitParameters struct { + + // Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` +} + +type ConnectivityObservation struct { + + // Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` +} + +type ConnectivityParameters struct { + + // Network the gateway will have access to. It's essential to specify network with subnets in all availability zones. + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId" tf:"network_id,omitempty"` +} + +type CustomDomainsInitParameters struct { + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` + + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` +} + +type CustomDomainsObservation struct { + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` + + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` +} + +type CustomDomainsParameters struct { + + // +kubebuilder:validation:Optional + CertificateID *string `json:"certificateId" tf:"certificate_id,omitempty"` + + // +kubebuilder:validation:Optional + DomainID *string `json:"domainId,omitempty" tf:"domain_id,omitempty"` + + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn" tf:"fqdn,omitempty"` +} + +type GatewayInitParameters struct { + + // Canary release settings of gateway. + Canary []CanaryInitParameters `json:"canary,omitempty" tf:"canary,omitempty"` + + // Gateway connectivity. If specified the gateway will be attached to specified network. + Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Set of custom domains to be attached to Yandex API Gateway. + CustomDomains []CustomDomainsInitParameters `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` + + // Description of the Yandex Cloud API Gateway. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Execution timeout in seconds for the Yandex Cloud API Gateway. + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Cloud API Gateway. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud API Gateway. + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Yandex Cloud API Gateway name used to define API Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // OpenAPI specification for Yandex API Gateway. + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type GatewayObservation struct { + + // Canary release settings of gateway. + Canary []CanaryObservation `json:"canary,omitempty" tf:"canary,omitempty"` + + // Gateway connectivity. If specified the gateway will be attached to specified network. + Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Creation timestamp of the Yandex Cloud API Gateway. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Set of custom domains to be attached to Yandex API Gateway. + CustomDomains []CustomDomainsObservation `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` + + // Description of the Yandex Cloud API Gateway. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Default domain for the Yandex API Gateway. Generated at creation time. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // Execution timeout in seconds for the Yandex Cloud API Gateway. + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud API Gateway. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Options for logging from Yandex Cloud API Gateway. + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Yandex Cloud API Gateway name used to define API Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // OpenAPI specification for Yandex API Gateway. + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + + // Status of the Yandex API Gateway. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // (DEPRECATED, use custom_domains instead) Set of user domains attached to Yandex API Gateway. + // +listType=set + UserDomains []*string `json:"userDomains,omitempty" tf:"user_domains,omitempty"` + + // A set of values for variables in gateway specification. + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type GatewayParameters struct { + + // Canary release settings of gateway. + // +kubebuilder:validation:Optional + Canary []CanaryParameters `json:"canary,omitempty" tf:"canary,omitempty"` + + // Gateway connectivity. If specified the gateway will be attached to specified network. + // +kubebuilder:validation:Optional + Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Set of custom domains to be attached to Yandex API Gateway. + // +kubebuilder:validation:Optional + CustomDomains []CustomDomainsParameters `json:"customDomains,omitempty" tf:"custom_domains,omitempty"` + + // Description of the Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Execution timeout in seconds for the Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud API Gateway. + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Yandex Cloud API Gateway name used to define API Gateway. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // OpenAPI specification for Yandex API Gateway. + // +kubebuilder:validation:Optional + Spec *string `json:"spec,omitempty" tf:"spec,omitempty"` + + // A set of values for variables in gateway specification. + // +kubebuilder:validation:Optional + // +mapType=granular + Variables map[string]*string `json:"variables,omitempty" tf:"variables,omitempty"` +} + +type LogOptionsInitParameters struct { + + // Is logging from API Gateway disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsObservation struct { + + // Is logging from API Gateway disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsParameters struct { + + // Is logging from API Gateway disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Folder ID for the Yandex Cloud API Gateway. If it is not provided, the default provider folder is used. + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +// GatewaySpec defines the desired state of Gateway +type GatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GatewayInitParameters `json:"initProvider,omitempty"` +} + +// GatewayStatus defines the observed state of Gateway. +type GatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Gateway is the Schema for the Gateways API. Allows management of a Yandex Cloud API Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Gateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.spec) || (has(self.initProvider) && has(self.initProvider.spec))",message="spec.forProvider.spec is a required parameter" + Spec GatewaySpec `json:"spec"` + Status GatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayList contains a list of Gateways +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Gateway `json:"items"` +} + +// Repository type metadata. +var ( + Gateway_Kind = "Gateway" + Gateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Gateway_Kind}.String() + Gateway_KindAPIVersion = Gateway_Kind + "." + CRDGroupVersion.String() + Gateway_GroupVersionKind = CRDGroupVersion.WithKind(Gateway_Kind) +) + +func init() { + SchemeBuilder.Register(&Gateway{}, &GatewayList{}) +} diff --git a/apis/api/v1alpha1/zz_generated.conversion_hubs.go b/apis/api/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..41b1249 --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Gateway) Hub() {} diff --git a/apis/api/v1alpha1/zz_generated.deepcopy.go b/apis/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..9bc5460 --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,823 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryInitParameters) DeepCopyInto(out *CanaryInitParameters) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryInitParameters. +func (in *CanaryInitParameters) DeepCopy() *CanaryInitParameters { + if in == nil { + return nil + } + out := new(CanaryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryObservation) DeepCopyInto(out *CanaryObservation) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryObservation. +func (in *CanaryObservation) DeepCopy() *CanaryObservation { + if in == nil { + return nil + } + out := new(CanaryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryParameters) DeepCopyInto(out *CanaryParameters) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryParameters. +func (in *CanaryParameters) DeepCopy() *CanaryParameters { + if in == nil { + return nil + } + out := new(CanaryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInitParameters) DeepCopyInto(out *ConnectivityInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInitParameters. +func (in *ConnectivityInitParameters) DeepCopy() *ConnectivityInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityObservation) DeepCopyInto(out *ConnectivityObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityObservation. +func (in *ConnectivityObservation) DeepCopy() *ConnectivityObservation { + if in == nil { + return nil + } + out := new(ConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityParameters) DeepCopyInto(out *ConnectivityParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityParameters. +func (in *ConnectivityParameters) DeepCopy() *ConnectivityParameters { + if in == nil { + return nil + } + out := new(ConnectivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainsInitParameters) DeepCopyInto(out *CustomDomainsInitParameters) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainsInitParameters. +func (in *CustomDomainsInitParameters) DeepCopy() *CustomDomainsInitParameters { + if in == nil { + return nil + } + out := new(CustomDomainsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainsObservation) DeepCopyInto(out *CustomDomainsObservation) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainsObservation. +func (in *CustomDomainsObservation) DeepCopy() *CustomDomainsObservation { + if in == nil { + return nil + } + out := new(CustomDomainsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainsParameters) DeepCopyInto(out *CustomDomainsParameters) { + *out = *in + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.DomainID != nil { + in, out := &in.DomainID, &out.DomainID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainsParameters. +func (in *CustomDomainsParameters) DeepCopy() *CustomDomainsParameters { + if in == nil { + return nil + } + out := new(CustomDomainsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInitParameters) DeepCopyInto(out *GatewayInitParameters) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = make([]CanaryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomDomains != nil { + in, out := &in.CustomDomains, &out.CustomDomains + *out = make([]CustomDomainsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInitParameters. +func (in *GatewayInitParameters) DeepCopy() *GatewayInitParameters { + if in == nil { + return nil + } + out := new(GatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayObservation) DeepCopyInto(out *GatewayObservation) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = make([]CanaryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.CustomDomains != nil { + in, out := &in.CustomDomains, &out.CustomDomains + *out = make([]CustomDomainsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.UserDomains != nil { + in, out := &in.UserDomains, &out.UserDomains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayObservation. +func (in *GatewayObservation) DeepCopy() *GatewayObservation { + if in == nil { + return nil + } + out := new(GatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayParameters) DeepCopyInto(out *GatewayParameters) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = make([]CanaryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomDomains != nil { + in, out := &in.CustomDomains, &out.CustomDomains + *out = make([]CustomDomainsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(string) + **out = **in + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayParameters. +func (in *GatewayParameters) DeepCopy() *GatewayParameters { + if in == nil { + return nil + } + out := new(GatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatus. +func (in *GatewayStatus) DeepCopy() *GatewayStatus { + if in == nil { + return nil + } + out := new(GatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/api/v1alpha1/zz_generated.managed.go b/apis/api/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..8eb7bff --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Gateway. +func (mg *Gateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Gateway. +func (mg *Gateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Gateway. +func (mg *Gateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Gateway. +func (mg *Gateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Gateway. +func (mg *Gateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Gateway. +func (mg *Gateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Gateway. +func (mg *Gateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Gateway. +func (mg *Gateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Gateway. +func (mg *Gateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Gateway. +func (mg *Gateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Gateway. +func (mg *Gateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Gateway. +func (mg *Gateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/api/v1alpha1/zz_generated.managedlist.go b/apis/api/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..d4c17d6 --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this GatewayList. +func (l *GatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/api/v1alpha1/zz_generated.resolvers.go b/apis/api/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..155dd00 --- /dev/null +++ b/apis/api/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Gateway. +func (mg *Gateway) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/api/v1alpha1/zz_groupversion_info.go b/apis/api/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..a7b4506 --- /dev/null +++ b/apis/api/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=api.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "api.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/backup/v1alpha1/zz_generated.conversion_hubs.go b/apis/backup/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..753e5b1 --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Policy) Hub() {} diff --git a/apis/backup/v1alpha1/zz_generated.deepcopy.go b/apis/backup/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..68811ce --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1230 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteByTimeInitParameters) DeepCopyInto(out *ExecuteByTimeInitParameters) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteByTimeInitParameters. +func (in *ExecuteByTimeInitParameters) DeepCopy() *ExecuteByTimeInitParameters { + if in == nil { + return nil + } + out := new(ExecuteByTimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteByTimeObservation) DeepCopyInto(out *ExecuteByTimeObservation) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteByTimeObservation. +func (in *ExecuteByTimeObservation) DeepCopy() *ExecuteByTimeObservation { + if in == nil { + return nil + } + out := new(ExecuteByTimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecuteByTimeParameters) DeepCopyInto(out *ExecuteByTimeParameters) { + *out = *in + if in.IncludeLastDayOfMonth != nil { + in, out := &in.IncludeLastDayOfMonth, &out.IncludeLastDayOfMonth + *out = new(bool) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RepeatAt != nil { + in, out := &in.RepeatAt, &out.RepeatAt + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RepeatEvery != nil { + in, out := &in.RepeatEvery, &out.RepeatEvery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecuteByTimeParameters. +func (in *ExecuteByTimeParameters) DeepCopy() *ExecuteByTimeParameters { + if in == nil { + return nil + } + out := new(ExecuteByTimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.ArchiveName != nil { + in, out := &in.ArchiveName, &out.ArchiveName + *out = new(string) + **out = **in + } + if in.Cbt != nil { + in, out := &in.Cbt, &out.Cbt + *out = new(string) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.FastBackupEnabled != nil { + in, out := &in.FastBackupEnabled, &out.FastBackupEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.MultiVolumeSnapshottingEnabled != nil { + in, out := &in.MultiVolumeSnapshottingEnabled, &out.MultiVolumeSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PerformanceWindowEnabled != nil { + in, out := &in.PerformanceWindowEnabled, &out.PerformanceWindowEnabled + *out = new(bool) + **out = **in + } + if in.PreserveFileSecuritySettings != nil { + in, out := &in.PreserveFileSecuritySettings, &out.PreserveFileSecuritySettings + *out = new(bool) + **out = **in + } + if in.QuiesceSnapshottingEnabled != nil { + in, out := &in.QuiesceSnapshottingEnabled, &out.QuiesceSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Reattempts != nil { + in, out := &in.Reattempts, &out.Reattempts + *out = make([]ReattemptsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SilentModeEnabled != nil { + in, out := &in.SilentModeEnabled, &out.SilentModeEnabled + *out = new(bool) + **out = **in + } + if in.SplittingBytes != nil { + in, out := &in.SplittingBytes, &out.SplittingBytes + *out = new(string) + **out = **in + } + if in.VMSnapshotReattempts != nil { + in, out := &in.VMSnapshotReattempts, &out.VMSnapshotReattempts + *out = make([]VMSnapshotReattemptsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VssProvider != nil { + in, out := &in.VssProvider, &out.VssProvider + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.ArchiveName != nil { + in, out := &in.ArchiveName, &out.ArchiveName + *out = new(string) + **out = **in + } + if in.Cbt != nil { + in, out := &in.Cbt, &out.Cbt + *out = new(string) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FastBackupEnabled != nil { + in, out := &in.FastBackupEnabled, &out.FastBackupEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MultiVolumeSnapshottingEnabled != nil { + in, out := &in.MultiVolumeSnapshottingEnabled, &out.MultiVolumeSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PerformanceWindowEnabled != nil { + in, out := &in.PerformanceWindowEnabled, &out.PerformanceWindowEnabled + *out = new(bool) + **out = **in + } + if in.PreserveFileSecuritySettings != nil { + in, out := &in.PreserveFileSecuritySettings, &out.PreserveFileSecuritySettings + *out = new(bool) + **out = **in + } + if in.QuiesceSnapshottingEnabled != nil { + in, out := &in.QuiesceSnapshottingEnabled, &out.QuiesceSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Reattempts != nil { + in, out := &in.Reattempts, &out.Reattempts + *out = make([]ReattemptsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SilentModeEnabled != nil { + in, out := &in.SilentModeEnabled, &out.SilentModeEnabled + *out = new(bool) + **out = **in + } + if in.SplittingBytes != nil { + in, out := &in.SplittingBytes, &out.SplittingBytes + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } + if in.VMSnapshotReattempts != nil { + in, out := &in.VMSnapshotReattempts, &out.VMSnapshotReattempts + *out = make([]VMSnapshotReattemptsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VssProvider != nil { + in, out := &in.VssProvider, &out.VssProvider + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.ArchiveName != nil { + in, out := &in.ArchiveName, &out.ArchiveName + *out = new(string) + **out = **in + } + if in.Cbt != nil { + in, out := &in.Cbt, &out.Cbt + *out = new(string) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(string) + **out = **in + } + if in.FastBackupEnabled != nil { + in, out := &in.FastBackupEnabled, &out.FastBackupEnabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.MultiVolumeSnapshottingEnabled != nil { + in, out := &in.MultiVolumeSnapshottingEnabled, &out.MultiVolumeSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PerformanceWindowEnabled != nil { + in, out := &in.PerformanceWindowEnabled, &out.PerformanceWindowEnabled + *out = new(bool) + **out = **in + } + if in.PreserveFileSecuritySettings != nil { + in, out := &in.PreserveFileSecuritySettings, &out.PreserveFileSecuritySettings + *out = new(bool) + **out = **in + } + if in.QuiesceSnapshottingEnabled != nil { + in, out := &in.QuiesceSnapshottingEnabled, &out.QuiesceSnapshottingEnabled + *out = new(bool) + **out = **in + } + if in.Reattempts != nil { + in, out := &in.Reattempts, &out.Reattempts + *out = make([]ReattemptsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Retention != nil { + in, out := &in.Retention, &out.Retention + *out = make([]RetentionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SilentModeEnabled != nil { + in, out := &in.SilentModeEnabled, &out.SilentModeEnabled + *out = new(bool) + **out = **in + } + if in.SplittingBytes != nil { + in, out := &in.SplittingBytes, &out.SplittingBytes + *out = new(string) + **out = **in + } + if in.VMSnapshotReattempts != nil { + in, out := &in.VMSnapshotReattempts, &out.VMSnapshotReattempts + *out = make([]VMSnapshotReattemptsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VssProvider != nil { + in, out := &in.VssProvider, &out.VssProvider + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { + if in == nil { + return nil + } + out := new(PolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReattemptsInitParameters) DeepCopyInto(out *ReattemptsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReattemptsInitParameters. +func (in *ReattemptsInitParameters) DeepCopy() *ReattemptsInitParameters { + if in == nil { + return nil + } + out := new(ReattemptsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReattemptsObservation) DeepCopyInto(out *ReattemptsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReattemptsObservation. +func (in *ReattemptsObservation) DeepCopy() *ReattemptsObservation { + if in == nil { + return nil + } + out := new(ReattemptsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReattemptsParameters) DeepCopyInto(out *ReattemptsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReattemptsParameters. +func (in *ReattemptsParameters) DeepCopy() *ReattemptsParameters { + if in == nil { + return nil + } + out := new(ReattemptsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionInitParameters) DeepCopyInto(out *RetentionInitParameters) { + *out = *in + if in.AfterBackup != nil { + in, out := &in.AfterBackup, &out.AfterBackup + *out = new(bool) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionInitParameters. +func (in *RetentionInitParameters) DeepCopy() *RetentionInitParameters { + if in == nil { + return nil + } + out := new(RetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionObservation) DeepCopyInto(out *RetentionObservation) { + *out = *in + if in.AfterBackup != nil { + in, out := &in.AfterBackup, &out.AfterBackup + *out = new(bool) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionObservation. +func (in *RetentionObservation) DeepCopy() *RetentionObservation { + if in == nil { + return nil + } + out := new(RetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionParameters) DeepCopyInto(out *RetentionParameters) { + *out = *in + if in.AfterBackup != nil { + in, out := &in.AfterBackup, &out.AfterBackup + *out = new(bool) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionParameters. +func (in *RetentionParameters) DeepCopy() *RetentionParameters { + if in == nil { + return nil + } + out := new(RetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { + *out = *in + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(string) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.RepeatPeriod != nil { + in, out := &in.RepeatPeriod, &out.RepeatPeriod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesInitParameters. +func (in *RulesInitParameters) DeepCopy() *RulesInitParameters { + if in == nil { + return nil + } + out := new(RulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { + *out = *in + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(string) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.RepeatPeriod != nil { + in, out := &in.RepeatPeriod, &out.RepeatPeriod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesObservation. +func (in *RulesObservation) DeepCopy() *RulesObservation { + if in == nil { + return nil + } + out := new(RulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { + *out = *in + if in.MaxAge != nil { + in, out := &in.MaxAge, &out.MaxAge + *out = new(string) + **out = **in + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.RepeatPeriod != nil { + in, out := &in.RepeatPeriod, &out.RepeatPeriod + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesParameters. +func (in *RulesParameters) DeepCopy() *RulesParameters { + if in == nil { + return nil + } + out := new(RulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingInitParameters) DeepCopyInto(out *SchedulingInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]ExecuteByTimeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxParallelBackups != nil { + in, out := &in.MaxParallelBackups, &out.MaxParallelBackups + *out = new(float64) + **out = **in + } + if in.RandomMaxDelay != nil { + in, out := &in.RandomMaxDelay, &out.RandomMaxDelay + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.WeeklyBackupDay != nil { + in, out := &in.WeeklyBackupDay, &out.WeeklyBackupDay + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingInitParameters. +func (in *SchedulingInitParameters) DeepCopy() *SchedulingInitParameters { + if in == nil { + return nil + } + out := new(SchedulingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingObservation) DeepCopyInto(out *SchedulingObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]ExecuteByTimeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxParallelBackups != nil { + in, out := &in.MaxParallelBackups, &out.MaxParallelBackups + *out = new(float64) + **out = **in + } + if in.RandomMaxDelay != nil { + in, out := &in.RandomMaxDelay, &out.RandomMaxDelay + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.WeeklyBackupDay != nil { + in, out := &in.WeeklyBackupDay, &out.WeeklyBackupDay + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingObservation. +func (in *SchedulingObservation) DeepCopy() *SchedulingObservation { + if in == nil { + return nil + } + out := new(SchedulingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingParameters) DeepCopyInto(out *SchedulingParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExecuteByInterval != nil { + in, out := &in.ExecuteByInterval, &out.ExecuteByInterval + *out = new(float64) + **out = **in + } + if in.ExecuteByTime != nil { + in, out := &in.ExecuteByTime, &out.ExecuteByTime + *out = make([]ExecuteByTimeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxParallelBackups != nil { + in, out := &in.MaxParallelBackups, &out.MaxParallelBackups + *out = new(float64) + **out = **in + } + if in.RandomMaxDelay != nil { + in, out := &in.RandomMaxDelay, &out.RandomMaxDelay + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } + if in.WeeklyBackupDay != nil { + in, out := &in.WeeklyBackupDay, &out.WeeklyBackupDay + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingParameters. +func (in *SchedulingParameters) DeepCopy() *SchedulingParameters { + if in == nil { + return nil + } + out := new(SchedulingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSnapshotReattemptsInitParameters) DeepCopyInto(out *VMSnapshotReattemptsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSnapshotReattemptsInitParameters. +func (in *VMSnapshotReattemptsInitParameters) DeepCopy() *VMSnapshotReattemptsInitParameters { + if in == nil { + return nil + } + out := new(VMSnapshotReattemptsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSnapshotReattemptsObservation) DeepCopyInto(out *VMSnapshotReattemptsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSnapshotReattemptsObservation. +func (in *VMSnapshotReattemptsObservation) DeepCopy() *VMSnapshotReattemptsObservation { + if in == nil { + return nil + } + out := new(VMSnapshotReattemptsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSnapshotReattemptsParameters) DeepCopyInto(out *VMSnapshotReattemptsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.MaxAttempts != nil { + in, out := &in.MaxAttempts, &out.MaxAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSnapshotReattemptsParameters. +func (in *VMSnapshotReattemptsParameters) DeepCopy() *VMSnapshotReattemptsParameters { + if in == nil { + return nil + } + out := new(VMSnapshotReattemptsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/backup/v1alpha1/zz_generated.managed.go b/apis/backup/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..0f27a8d --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Policy. +func (mg *Policy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Policy. +func (mg *Policy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Policy. +func (mg *Policy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Policy. +func (mg *Policy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Policy. +func (mg *Policy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Policy. +func (mg *Policy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Policy. +func (mg *Policy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Policy. +func (mg *Policy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Policy. +func (mg *Policy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Policy. +func (mg *Policy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/backup/v1alpha1/zz_generated.managedlist.go b/apis/backup/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..cbc784b --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PolicyList. +func (l *PolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/backup/v1alpha1/zz_generated.resolvers.go b/apis/backup/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..e01964e --- /dev/null +++ b/apis/backup/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Policy. +func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/backup/v1alpha1/zz_groupversion_info.go b/apis/backup/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..e41e135 --- /dev/null +++ b/apis/backup/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=backup.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "backup.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/backup/v1alpha1/zz_policy_terraformed.go b/apis/backup/v1alpha1/zz_policy_terraformed.go new file mode 100755 index 0000000..2ce4736 --- /dev/null +++ b/apis/backup/v1alpha1/zz_policy_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Policy +func (mg *Policy) GetTerraformResourceType() string { + return "yandex_backup_policy" +} + +// GetConnectionDetailsMapping for this Policy +func (tr *Policy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Policy +func (tr *Policy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Policy +func (tr *Policy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Policy +func (tr *Policy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Policy +func (tr *Policy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Policy +func (tr *Policy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Policy +func (tr *Policy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Policy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Policy) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Policy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/backup/v1alpha1/zz_policy_types.go b/apis/backup/v1alpha1/zz_policy_types.go new file mode 100755 index 0000000..dc6165e --- /dev/null +++ b/apis/backup/v1alpha1/zz_policy_types.go @@ -0,0 +1,633 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExecuteByTimeInitParameters struct { + + // — If true, schedule will be applied on the last day of month. + // See day_type for available values. + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` + + // — List of days when schedule applies. Used in "MONTHLY" type. + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // — seconds + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + + // hours format), when the schedule applies. + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + + // — Frequency of backup repetition. See interval_type for available values. + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ExecuteByTimeObservation struct { + + // — If true, schedule will be applied on the last day of month. + // See day_type for available values. + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` + + // — List of days when schedule applies. Used in "MONTHLY" type. + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // — seconds + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + + // hours format), when the schedule applies. + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + + // — Frequency of backup repetition. See interval_type for available values. + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ExecuteByTimeParameters struct { + + // — If true, schedule will be applied on the last day of month. + // See day_type for available values. + // +kubebuilder:validation:Optional + IncludeLastDayOfMonth *bool `json:"includeLastDayOfMonth,omitempty" tf:"include_last_day_of_month,omitempty"` + + // — List of days when schedule applies. Used in "MONTHLY" type. + // +kubebuilder:validation:Optional + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // — seconds + // +kubebuilder:validation:Optional + Months []*float64 `json:"months,omitempty" tf:"months,omitempty"` + + // hours format), when the schedule applies. + // +kubebuilder:validation:Optional + RepeatAt []*string `json:"repeatAt,omitempty" tf:"repeat_at,omitempty"` + + // — Frequency of backup repetition. See interval_type for available values. + // +kubebuilder:validation:Optional + RepeatEvery *string `json:"repeatEvery,omitempty" tf:"repeat_every,omitempty"` + + // — Type of the scheduling. Available values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY". + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // — List of weekdays when the backup will be applied. Used in "WEEKLY" type. + // +kubebuilder:validation:Optional + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type PolicyInitParameters struct { + + // [Plan ID]-[Unique ID]a) — The name of generated archives. + ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` + + // — Configuration of Changed Block Tracking. + // Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` + + // — Archive compression level. Affects CPU. + // Available values: "NORMAL", "HIGH", "MAX", "OFF". + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // — Enable flag + FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` + + // — days + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". + // Available values: "AUTO", "VERSION_11", "VERSION_12". + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // — If true, snapshots of multiple volumes will be taken simultaneously. + MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` + + // — Name of the policy + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // — Time windows for performance limitations of backup. + PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` + + // — Preserves file security settings. It's better to set this option to true. + PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` + + // — If true, a quiesced snapshot of the virtual machine will be taken. + QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` + + // — Amount of reattempts that should be performed while trying to make backup at the host. + // This attribute consists of the following parameters: + Reattempts []ReattemptsInitParameters `json:"reattempts,omitempty" tf:"reattempts,omitempty"` + + // — Retention policy for backups. Allows to setup backups lifecycle. + // This attribute consists of the following parameters: + Retention []RetentionInitParameters `json:"retention,omitempty" tf:"retention,omitempty"` + + // — Schedule settings for creating backups on the host. + Scheduling []SchedulingInitParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + + // — if true, a user interaction will be avoided when possible. + SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` + + // — determines the size to split backups. It's better to leave this option unchanged. + SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` + + // (Requied) — Amount of reattempts that should be performed while trying to make snapshot. + // This attribute consists of the following parameters: + VMSnapshotReattempts []VMSnapshotReattemptsInitParameters `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` + + // — Settings for the volume shadow copy service. + // Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` +} + +type PolicyObservation struct { + + // [Plan ID]-[Unique ID]a) — The name of generated archives. + ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` + + // — Configuration of Changed Block Tracking. + // Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` + + // — Archive compression level. Affects CPU. + // Available values: "NORMAL", "HIGH", "MAX", "OFF". + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Enable flag + FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` + + // — days + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". + // Available values: "AUTO", "VERSION_11", "VERSION_12". + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // — days + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // — If true, snapshots of multiple volumes will be taken simultaneously. + MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` + + // — Name of the policy + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // — Time windows for performance limitations of backup. + PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` + + // — Preserves file security settings. It's better to set this option to true. + PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` + + // — If true, a quiesced snapshot of the virtual machine will be taken. + QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` + + // — Amount of reattempts that should be performed while trying to make backup at the host. + // This attribute consists of the following parameters: + Reattempts []ReattemptsObservation `json:"reattempts,omitempty" tf:"reattempts,omitempty"` + + // — Retention policy for backups. Allows to setup backups lifecycle. + // This attribute consists of the following parameters: + Retention []RetentionObservation `json:"retention,omitempty" tf:"retention,omitempty"` + + // — Schedule settings for creating backups on the host. + Scheduling []SchedulingObservation `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + + // — if true, a user interaction will be avoided when possible. + SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` + + // — determines the size to split backups. It's better to leave this option unchanged. + SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` + + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` + + // (Requied) — Amount of reattempts that should be performed while trying to make snapshot. + // This attribute consists of the following parameters: + VMSnapshotReattempts []VMSnapshotReattemptsObservation `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` + + // — Settings for the volume shadow copy service. + // Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` +} + +type PolicyParameters struct { + + // [Plan ID]-[Unique ID]a) — The name of generated archives. + // +kubebuilder:validation:Optional + ArchiveName *string `json:"archiveName,omitempty" tf:"archive_name,omitempty"` + + // — Configuration of Changed Block Tracking. + // Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + // +kubebuilder:validation:Optional + Cbt *string `json:"cbt,omitempty" tf:"cbt,omitempty"` + + // — Archive compression level. Affects CPU. + // Available values: "NORMAL", "HIGH", "MAX", "OFF". + // +kubebuilder:validation:Optional + Compression *string `json:"compression,omitempty" tf:"compression,omitempty"` + + // — Enable flag + // +kubebuilder:validation:Optional + FastBackupEnabled *bool `json:"fastBackupEnabled,omitempty" tf:"fast_backup_enabled,omitempty"` + + // — days + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". + // Available values: "AUTO", "VERSION_11", "VERSION_12". + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // — If true, snapshots of multiple volumes will be taken simultaneously. + // +kubebuilder:validation:Optional + MultiVolumeSnapshottingEnabled *bool `json:"multiVolumeSnapshottingEnabled,omitempty" tf:"multi_volume_snapshotting_enabled,omitempty"` + + // — Name of the policy + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // — Time windows for performance limitations of backup. + // +kubebuilder:validation:Optional + PerformanceWindowEnabled *bool `json:"performanceWindowEnabled,omitempty" tf:"performance_window_enabled,omitempty"` + + // — Preserves file security settings. It's better to set this option to true. + // +kubebuilder:validation:Optional + PreserveFileSecuritySettings *bool `json:"preserveFileSecuritySettings,omitempty" tf:"preserve_file_security_settings,omitempty"` + + // — If true, a quiesced snapshot of the virtual machine will be taken. + // +kubebuilder:validation:Optional + QuiesceSnapshottingEnabled *bool `json:"quiesceSnapshottingEnabled,omitempty" tf:"quiesce_snapshotting_enabled,omitempty"` + + // — Amount of reattempts that should be performed while trying to make backup at the host. + // This attribute consists of the following parameters: + // +kubebuilder:validation:Optional + Reattempts []ReattemptsParameters `json:"reattempts,omitempty" tf:"reattempts,omitempty"` + + // — Retention policy for backups. Allows to setup backups lifecycle. + // This attribute consists of the following parameters: + // +kubebuilder:validation:Optional + Retention []RetentionParameters `json:"retention,omitempty" tf:"retention,omitempty"` + + // — Schedule settings for creating backups on the host. + // +kubebuilder:validation:Optional + Scheduling []SchedulingParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + + // — if true, a user interaction will be avoided when possible. + // +kubebuilder:validation:Optional + SilentModeEnabled *bool `json:"silentModeEnabled,omitempty" tf:"silent_mode_enabled,omitempty"` + + // — determines the size to split backups. It's better to leave this option unchanged. + // +kubebuilder:validation:Optional + SplittingBytes *string `json:"splittingBytes,omitempty" tf:"splitting_bytes,omitempty"` + + // (Requied) — Amount of reattempts that should be performed while trying to make snapshot. + // This attribute consists of the following parameters: + // +kubebuilder:validation:Optional + VMSnapshotReattempts []VMSnapshotReattemptsParameters `json:"vmSnapshotReattempts,omitempty" tf:"vm_snapshot_reattempts,omitempty"` + + // — Settings for the volume shadow copy service. + // Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + // +kubebuilder:validation:Optional + VssProvider *string `json:"vssProvider,omitempty" tf:"vss_provider,omitempty"` +} + +type ReattemptsInitParameters struct { + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` +} + +type ReattemptsObservation struct { + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` +} + +type ReattemptsParameters struct { + + // — Enable flag + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Retry interval. See interval_type for available values + // +kubebuilder:validation:Optional + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // — Maximum number of attempts before throwing an error + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` +} + +type RetentionInitParameters struct { + + // — Defines whether retention rule applies after creating backup or before. + AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` + + // — seconds + Rules []RulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type RetentionObservation struct { + + // — Defines whether retention rule applies after creating backup or before. + AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` + + // — seconds + Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type RetentionParameters struct { + + // — Defines whether retention rule applies after creating backup or before. + // +kubebuilder:validation:Optional + AfterBackup *bool `json:"afterBackup,omitempty" tf:"after_backup,omitempty"` + + // — seconds + // +kubebuilder:validation:Optional + Rules []RulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type RulesInitParameters struct { + + // (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. + MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` + + // (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // — days + RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` +} + +type RulesObservation struct { + + // (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. + MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` + + // (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // — days + RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` +} + +type RulesParameters struct { + + // (Conflicts with max_count) — Deletes backups that older than max_age. Exactly one of max_count or max_age should be set. + // +kubebuilder:validation:Optional + MaxAge *string `json:"maxAge,omitempty" tf:"max_age,omitempty"` + + // (Conflicts with max_age) — Deletes backups if it's count exceeds max_count. Exactly one of max_count or max_age should be set. + // +kubebuilder:validation:Optional + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // — days + // +kubebuilder:validation:Optional + RepeatPeriod []*string `json:"repeatPeriod,omitempty" tf:"repeat_period,omitempty"` +} + +type SchedulingInitParameters struct { + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. + // See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByTime []ExecuteByTimeInitParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` + + // — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. + MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` + + // — Configuration of the random delay between the execution of parallel tasks. + // See interval_type for available values. + RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` + + // — Scheme of the backups. + // Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + // — A day of week to start weekly backups. + // See day_type for available values. + WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` +} + +type SchedulingObservation struct { + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. + // See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + ExecuteByTime []ExecuteByTimeObservation `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` + + // — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. + MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` + + // — Configuration of the random delay between the execution of parallel tasks. + // See interval_type for available values. + RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` + + // — Scheme of the backups. + // Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + // — A day of week to start weekly backups. + // See day_type for available values. + WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` +} + +type SchedulingParameters struct { + + // — Enable flag + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. + // See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + // +kubebuilder:validation:Optional + ExecuteByInterval *float64 `json:"executeByInterval,omitempty" tf:"execute_by_interval,omitempty"` + + // — Perform backup periodically at specific time. Exactly on of options should be set: execute_by_interval or execute_by_time. + // +kubebuilder:validation:Optional + ExecuteByTime []ExecuteByTimeParameters `json:"executeByTime,omitempty" tf:"execute_by_time,omitempty"` + + // — Maximum number of backup processes allowed to run in parallel. 0 for unlimited. + // +kubebuilder:validation:Optional + MaxParallelBackups *float64 `json:"maxParallelBackups,omitempty" tf:"max_parallel_backups,omitempty"` + + // — Configuration of the random delay between the execution of parallel tasks. + // See interval_type for available values. + // +kubebuilder:validation:Optional + RandomMaxDelay *string `json:"randomMaxDelay,omitempty" tf:"random_max_delay,omitempty"` + + // — Scheme of the backups. + // Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + // +kubebuilder:validation:Optional + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` + + // — A day of week to start weekly backups. + // See day_type for available values. + // +kubebuilder:validation:Optional + WeeklyBackupDay *string `json:"weeklyBackupDay,omitempty" tf:"weekly_backup_day,omitempty"` +} + +type VMSnapshotReattemptsInitParameters struct { + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` +} + +type VMSnapshotReattemptsObservation struct { + + // — Enable flag + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Retry interval. See interval_type for available values + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // — Maximum number of attempts before throwing an error + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` +} + +type VMSnapshotReattemptsParameters struct { + + // — Enable flag + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // — Retry interval. See interval_type for available values + // +kubebuilder:validation:Optional + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // — Maximum number of attempts before throwing an error + // +kubebuilder:validation:Optional + MaxAttempts *float64 `json:"maxAttempts,omitempty" tf:"max_attempts,omitempty"` +} + +// PolicySpec defines the desired state of Policy +type PolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyInitParameters `json:"initProvider,omitempty"` +} + +// PolicyStatus defines the observed state of Policy. +type PolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Policy is the Schema for the Policys API. Allows management of Yandex.Cloud Backup Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.reattempts) || (has(self.initProvider) && has(self.initProvider.reattempts))",message="spec.forProvider.reattempts is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retention) || (has(self.initProvider) && has(self.initProvider.retention))",message="spec.forProvider.retention is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scheduling) || (has(self.initProvider) && has(self.initProvider.scheduling))",message="spec.forProvider.scheduling is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmSnapshotReattempts) || (has(self.initProvider) && has(self.initProvider.vmSnapshotReattempts))",message="spec.forProvider.vmSnapshotReattempts is a required parameter" + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyList contains a list of Policys +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Policy `json:"items"` +} + +// Repository type metadata. +var ( + Policy_Kind = "Policy" + Policy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Policy_Kind}.String() + Policy_KindAPIVersion = Policy_Kind + "." + CRDGroupVersion.String() + Policy_GroupVersionKind = CRDGroupVersion.WithKind(Policy_Kind) +) + +func init() { + SchemeBuilder.Register(&Policy{}, &PolicyList{}) +} diff --git a/apis/billing/v1alpha1/zz_cloudbinding_terraformed.go b/apis/billing/v1alpha1/zz_cloudbinding_terraformed.go new file mode 100755 index 0000000..cf80ab5 --- /dev/null +++ b/apis/billing/v1alpha1/zz_cloudbinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CloudBinding +func (mg *CloudBinding) GetTerraformResourceType() string { + return "yandex_billing_cloud_binding" +} + +// GetConnectionDetailsMapping for this CloudBinding +func (tr *CloudBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CloudBinding +func (tr *CloudBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CloudBinding +func (tr *CloudBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CloudBinding +func (tr *CloudBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CloudBinding +func (tr *CloudBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CloudBinding +func (tr *CloudBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CloudBinding +func (tr *CloudBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CloudBinding +func (tr *CloudBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CloudBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CloudBinding) LateInitialize(attrs []byte) (bool, error) { + params := &CloudBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CloudBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/billing/v1alpha1/zz_cloudbinding_types.go b/apis/billing/v1alpha1/zz_cloudbinding_types.go new file mode 100755 index 0000000..e8b60e6 --- /dev/null +++ b/apis/billing/v1alpha1/zz_cloudbinding_types.go @@ -0,0 +1,104 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudBindingInitParameters struct { + + // ID of billing account to bind cloud to. + BillingAccountID *string `json:"billingAccountId,omitempty" tf:"billing_account_id,omitempty"` + + // ID of cloud to bind. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` +} + +type CloudBindingObservation struct { + + // ID of billing account to bind cloud to. + BillingAccountID *string `json:"billingAccountId,omitempty" tf:"billing_account_id,omitempty"` + + // ID of cloud to bind. + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type CloudBindingParameters struct { + + // ID of billing account to bind cloud to. + // +kubebuilder:validation:Optional + BillingAccountID *string `json:"billingAccountId,omitempty" tf:"billing_account_id,omitempty"` + + // ID of cloud to bind. + // +kubebuilder:validation:Optional + CloudID *string `json:"cloudId,omitempty" tf:"cloud_id,omitempty"` +} + +// CloudBindingSpec defines the desired state of CloudBinding +type CloudBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CloudBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CloudBindingInitParameters `json:"initProvider,omitempty"` +} + +// CloudBindingStatus defines the observed state of CloudBinding. +type CloudBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CloudBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CloudBinding is the Schema for the CloudBindings API. Bind cloud to billing account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type CloudBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.billingAccountId) || (has(self.initProvider) && has(self.initProvider.billingAccountId))",message="spec.forProvider.billingAccountId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.cloudId) || (has(self.initProvider) && has(self.initProvider.cloudId))",message="spec.forProvider.cloudId is a required parameter" + Spec CloudBindingSpec `json:"spec"` + Status CloudBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CloudBindingList contains a list of CloudBindings +type CloudBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudBinding `json:"items"` +} + +// Repository type metadata. +var ( + CloudBinding_Kind = "CloudBinding" + CloudBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CloudBinding_Kind}.String() + CloudBinding_KindAPIVersion = CloudBinding_Kind + "." + CRDGroupVersion.String() + CloudBinding_GroupVersionKind = CRDGroupVersion.WithKind(CloudBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&CloudBinding{}, &CloudBindingList{}) +} diff --git a/apis/billing/v1alpha1/zz_generated.conversion_hubs.go b/apis/billing/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..a2f6b4c --- /dev/null +++ b/apis/billing/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *CloudBinding) Hub() {} diff --git a/apis/billing/v1alpha1/zz_generated.deepcopy.go b/apis/billing/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..073c2ea --- /dev/null +++ b/apis/billing/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBinding) DeepCopyInto(out *CloudBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBinding. +func (in *CloudBinding) DeepCopy() *CloudBinding { + if in == nil { + return nil + } + out := new(CloudBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingInitParameters) DeepCopyInto(out *CloudBindingInitParameters) { + *out = *in + if in.BillingAccountID != nil { + in, out := &in.BillingAccountID, &out.BillingAccountID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingInitParameters. +func (in *CloudBindingInitParameters) DeepCopy() *CloudBindingInitParameters { + if in == nil { + return nil + } + out := new(CloudBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingList) DeepCopyInto(out *CloudBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingList. +func (in *CloudBindingList) DeepCopy() *CloudBindingList { + if in == nil { + return nil + } + out := new(CloudBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingObservation) DeepCopyInto(out *CloudBindingObservation) { + *out = *in + if in.BillingAccountID != nil { + in, out := &in.BillingAccountID, &out.BillingAccountID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingObservation. +func (in *CloudBindingObservation) DeepCopy() *CloudBindingObservation { + if in == nil { + return nil + } + out := new(CloudBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingParameters) DeepCopyInto(out *CloudBindingParameters) { + *out = *in + if in.BillingAccountID != nil { + in, out := &in.BillingAccountID, &out.BillingAccountID + *out = new(string) + **out = **in + } + if in.CloudID != nil { + in, out := &in.CloudID, &out.CloudID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingParameters. +func (in *CloudBindingParameters) DeepCopy() *CloudBindingParameters { + if in == nil { + return nil + } + out := new(CloudBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingSpec) DeepCopyInto(out *CloudBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingSpec. +func (in *CloudBindingSpec) DeepCopy() *CloudBindingSpec { + if in == nil { + return nil + } + out := new(CloudBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudBindingStatus) DeepCopyInto(out *CloudBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudBindingStatus. +func (in *CloudBindingStatus) DeepCopy() *CloudBindingStatus { + if in == nil { + return nil + } + out := new(CloudBindingStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/billing/v1alpha1/zz_generated.managed.go b/apis/billing/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..2f7b4ee --- /dev/null +++ b/apis/billing/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CloudBinding. +func (mg *CloudBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CloudBinding. +func (mg *CloudBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CloudBinding. +func (mg *CloudBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CloudBinding. +func (mg *CloudBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CloudBinding. +func (mg *CloudBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CloudBinding. +func (mg *CloudBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CloudBinding. +func (mg *CloudBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CloudBinding. +func (mg *CloudBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CloudBinding. +func (mg *CloudBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CloudBinding. +func (mg *CloudBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CloudBinding. +func (mg *CloudBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CloudBinding. +func (mg *CloudBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/billing/v1alpha1/zz_generated.managedlist.go b/apis/billing/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..930520a --- /dev/null +++ b/apis/billing/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CloudBindingList. +func (l *CloudBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/billing/v1alpha1/zz_groupversion_info.go b/apis/billing/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..9f2c087 --- /dev/null +++ b/apis/billing/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=billing.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "billing.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go b/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..8304d38 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,9 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *OriginGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Resource) Hub() {} diff --git a/apis/cdn/v1alpha1/zz_generated.deepcopy.go b/apis/cdn/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..898fe65 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1464 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressACLInitParameters) DeepCopyInto(out *IPAddressACLInitParameters) { + *out = *in + if in.ExceptedValues != nil { + in, out := &in.ExceptedValues, &out.ExceptedValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressACLInitParameters. +func (in *IPAddressACLInitParameters) DeepCopy() *IPAddressACLInitParameters { + if in == nil { + return nil + } + out := new(IPAddressACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressACLObservation) DeepCopyInto(out *IPAddressACLObservation) { + *out = *in + if in.ExceptedValues != nil { + in, out := &in.ExceptedValues, &out.ExceptedValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressACLObservation. +func (in *IPAddressACLObservation) DeepCopy() *IPAddressACLObservation { + if in == nil { + return nil + } + out := new(IPAddressACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressACLParameters) DeepCopyInto(out *IPAddressACLParameters) { + *out = *in + if in.ExceptedValues != nil { + in, out := &in.ExceptedValues, &out.ExceptedValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressACLParameters. +func (in *IPAddressACLParameters) DeepCopy() *IPAddressACLParameters { + if in == nil { + return nil + } + out := new(IPAddressACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsInitParameters) DeepCopyInto(out *OptionsInitParameters) { + *out = *in + if in.AllowedHTTPMethods != nil { + in, out := &in.AllowedHTTPMethods, &out.AllowedHTTPMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BrowserCacheSettings != nil { + in, out := &in.BrowserCacheSettings, &out.BrowserCacheSettings + *out = new(float64) + **out = **in + } + if in.CacheHTTPHeaders != nil { + in, out := &in.CacheHTTPHeaders, &out.CacheHTTPHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomHostHeader != nil { + in, out := &in.CustomHostHeader, &out.CustomHostHeader + *out = new(string) + **out = **in + } + if in.CustomServerName != nil { + in, out := &in.CustomServerName, &out.CustomServerName + *out = new(string) + **out = **in + } + if in.DisableCache != nil { + in, out := &in.DisableCache, &out.DisableCache + *out = new(bool) + **out = **in + } + if in.DisableProxyForceRanges != nil { + in, out := &in.DisableProxyForceRanges, &out.DisableProxyForceRanges + *out = new(bool) + **out = **in + } + if in.EdgeCacheSettings != nil { + in, out := &in.EdgeCacheSettings, &out.EdgeCacheSettings + *out = new(float64) + **out = **in + } + if in.EnableIPURLSigning != nil { + in, out := &in.EnableIPURLSigning, &out.EnableIPURLSigning + *out = new(bool) + **out = **in + } + if in.FetchedCompressed != nil { + in, out := &in.FetchedCompressed, &out.FetchedCompressed + *out = new(bool) + **out = **in + } + if in.ForwardHostHeader != nil { + in, out := &in.ForwardHostHeader, &out.ForwardHostHeader + *out = new(bool) + **out = **in + } + if in.GzipOn != nil { + in, out := &in.GzipOn, &out.GzipOn + *out = new(bool) + **out = **in + } + if in.IPAddressACL != nil { + in, out := &in.IPAddressACL, &out.IPAddressACL + *out = make([]IPAddressACLInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IgnoreCookie != nil { + in, out := &in.IgnoreCookie, &out.IgnoreCookie + *out = new(bool) + **out = **in + } + if in.IgnoreQueryParams != nil { + in, out := &in.IgnoreQueryParams, &out.IgnoreQueryParams + *out = new(bool) + **out = **in + } + if in.ProxyCacheMethodsSet != nil { + in, out := &in.ProxyCacheMethodsSet, &out.ProxyCacheMethodsSet + *out = new(bool) + **out = **in + } + if in.QueryParamsBlacklist != nil { + in, out := &in.QueryParamsBlacklist, &out.QueryParamsBlacklist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryParamsWhitelist != nil { + in, out := &in.QueryParamsWhitelist, &out.QueryParamsWhitelist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectHTTPSToHTTP != nil { + in, out := &in.RedirectHTTPSToHTTP, &out.RedirectHTTPSToHTTP + *out = new(bool) + **out = **in + } + if in.RedirectHTTPToHTTPS != nil { + in, out := &in.RedirectHTTPToHTTPS, &out.RedirectHTTPToHTTPS + *out = new(bool) + **out = **in + } + if in.SecureKey != nil { + in, out := &in.SecureKey, &out.SecureKey + *out = new(string) + **out = **in + } + if in.Slice != nil { + in, out := &in.Slice, &out.Slice + *out = new(bool) + **out = **in + } + if in.StaticRequestHeaders != nil { + in, out := &in.StaticRequestHeaders, &out.StaticRequestHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StaticResponseHeaders != nil { + in, out := &in.StaticResponseHeaders, &out.StaticResponseHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsInitParameters. +func (in *OptionsInitParameters) DeepCopy() *OptionsInitParameters { + if in == nil { + return nil + } + out := new(OptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsObservation) DeepCopyInto(out *OptionsObservation) { + *out = *in + if in.AllowedHTTPMethods != nil { + in, out := &in.AllowedHTTPMethods, &out.AllowedHTTPMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BrowserCacheSettings != nil { + in, out := &in.BrowserCacheSettings, &out.BrowserCacheSettings + *out = new(float64) + **out = **in + } + if in.CacheHTTPHeaders != nil { + in, out := &in.CacheHTTPHeaders, &out.CacheHTTPHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomHostHeader != nil { + in, out := &in.CustomHostHeader, &out.CustomHostHeader + *out = new(string) + **out = **in + } + if in.CustomServerName != nil { + in, out := &in.CustomServerName, &out.CustomServerName + *out = new(string) + **out = **in + } + if in.DisableCache != nil { + in, out := &in.DisableCache, &out.DisableCache + *out = new(bool) + **out = **in + } + if in.DisableProxyForceRanges != nil { + in, out := &in.DisableProxyForceRanges, &out.DisableProxyForceRanges + *out = new(bool) + **out = **in + } + if in.EdgeCacheSettings != nil { + in, out := &in.EdgeCacheSettings, &out.EdgeCacheSettings + *out = new(float64) + **out = **in + } + if in.EnableIPURLSigning != nil { + in, out := &in.EnableIPURLSigning, &out.EnableIPURLSigning + *out = new(bool) + **out = **in + } + if in.FetchedCompressed != nil { + in, out := &in.FetchedCompressed, &out.FetchedCompressed + *out = new(bool) + **out = **in + } + if in.ForwardHostHeader != nil { + in, out := &in.ForwardHostHeader, &out.ForwardHostHeader + *out = new(bool) + **out = **in + } + if in.GzipOn != nil { + in, out := &in.GzipOn, &out.GzipOn + *out = new(bool) + **out = **in + } + if in.IPAddressACL != nil { + in, out := &in.IPAddressACL, &out.IPAddressACL + *out = make([]IPAddressACLObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IgnoreCookie != nil { + in, out := &in.IgnoreCookie, &out.IgnoreCookie + *out = new(bool) + **out = **in + } + if in.IgnoreQueryParams != nil { + in, out := &in.IgnoreQueryParams, &out.IgnoreQueryParams + *out = new(bool) + **out = **in + } + if in.ProxyCacheMethodsSet != nil { + in, out := &in.ProxyCacheMethodsSet, &out.ProxyCacheMethodsSet + *out = new(bool) + **out = **in + } + if in.QueryParamsBlacklist != nil { + in, out := &in.QueryParamsBlacklist, &out.QueryParamsBlacklist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryParamsWhitelist != nil { + in, out := &in.QueryParamsWhitelist, &out.QueryParamsWhitelist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectHTTPSToHTTP != nil { + in, out := &in.RedirectHTTPSToHTTP, &out.RedirectHTTPSToHTTP + *out = new(bool) + **out = **in + } + if in.RedirectHTTPToHTTPS != nil { + in, out := &in.RedirectHTTPToHTTPS, &out.RedirectHTTPToHTTPS + *out = new(bool) + **out = **in + } + if in.SecureKey != nil { + in, out := &in.SecureKey, &out.SecureKey + *out = new(string) + **out = **in + } + if in.Slice != nil { + in, out := &in.Slice, &out.Slice + *out = new(bool) + **out = **in + } + if in.StaticRequestHeaders != nil { + in, out := &in.StaticRequestHeaders, &out.StaticRequestHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StaticResponseHeaders != nil { + in, out := &in.StaticResponseHeaders, &out.StaticResponseHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsObservation. +func (in *OptionsObservation) DeepCopy() *OptionsObservation { + if in == nil { + return nil + } + out := new(OptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsParameters) DeepCopyInto(out *OptionsParameters) { + *out = *in + if in.AllowedHTTPMethods != nil { + in, out := &in.AllowedHTTPMethods, &out.AllowedHTTPMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BrowserCacheSettings != nil { + in, out := &in.BrowserCacheSettings, &out.BrowserCacheSettings + *out = new(float64) + **out = **in + } + if in.CacheHTTPHeaders != nil { + in, out := &in.CacheHTTPHeaders, &out.CacheHTTPHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomHostHeader != nil { + in, out := &in.CustomHostHeader, &out.CustomHostHeader + *out = new(string) + **out = **in + } + if in.CustomServerName != nil { + in, out := &in.CustomServerName, &out.CustomServerName + *out = new(string) + **out = **in + } + if in.DisableCache != nil { + in, out := &in.DisableCache, &out.DisableCache + *out = new(bool) + **out = **in + } + if in.DisableProxyForceRanges != nil { + in, out := &in.DisableProxyForceRanges, &out.DisableProxyForceRanges + *out = new(bool) + **out = **in + } + if in.EdgeCacheSettings != nil { + in, out := &in.EdgeCacheSettings, &out.EdgeCacheSettings + *out = new(float64) + **out = **in + } + if in.EnableIPURLSigning != nil { + in, out := &in.EnableIPURLSigning, &out.EnableIPURLSigning + *out = new(bool) + **out = **in + } + if in.FetchedCompressed != nil { + in, out := &in.FetchedCompressed, &out.FetchedCompressed + *out = new(bool) + **out = **in + } + if in.ForwardHostHeader != nil { + in, out := &in.ForwardHostHeader, &out.ForwardHostHeader + *out = new(bool) + **out = **in + } + if in.GzipOn != nil { + in, out := &in.GzipOn, &out.GzipOn + *out = new(bool) + **out = **in + } + if in.IPAddressACL != nil { + in, out := &in.IPAddressACL, &out.IPAddressACL + *out = make([]IPAddressACLParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IgnoreCookie != nil { + in, out := &in.IgnoreCookie, &out.IgnoreCookie + *out = new(bool) + **out = **in + } + if in.IgnoreQueryParams != nil { + in, out := &in.IgnoreQueryParams, &out.IgnoreQueryParams + *out = new(bool) + **out = **in + } + if in.ProxyCacheMethodsSet != nil { + in, out := &in.ProxyCacheMethodsSet, &out.ProxyCacheMethodsSet + *out = new(bool) + **out = **in + } + if in.QueryParamsBlacklist != nil { + in, out := &in.QueryParamsBlacklist, &out.QueryParamsBlacklist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryParamsWhitelist != nil { + in, out := &in.QueryParamsWhitelist, &out.QueryParamsWhitelist + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectHTTPSToHTTP != nil { + in, out := &in.RedirectHTTPSToHTTP, &out.RedirectHTTPSToHTTP + *out = new(bool) + **out = **in + } + if in.RedirectHTTPToHTTPS != nil { + in, out := &in.RedirectHTTPToHTTPS, &out.RedirectHTTPToHTTPS + *out = new(bool) + **out = **in + } + if in.SecureKey != nil { + in, out := &in.SecureKey, &out.SecureKey + *out = new(string) + **out = **in + } + if in.Slice != nil { + in, out := &in.Slice, &out.Slice + *out = new(bool) + **out = **in + } + if in.StaticRequestHeaders != nil { + in, out := &in.StaticRequestHeaders, &out.StaticRequestHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.StaticResponseHeaders != nil { + in, out := &in.StaticResponseHeaders, &out.StaticResponseHeaders + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsParameters. +func (in *OptionsParameters) DeepCopy() *OptionsParameters { + if in == nil { + return nil + } + out := new(OptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroup) DeepCopyInto(out *OriginGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroup. +func (in *OriginGroup) DeepCopy() *OriginGroup { + if in == nil { + return nil + } + out := new(OriginGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OriginGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupInitParameters) DeepCopyInto(out *OriginGroupInitParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UseNext != nil { + in, out := &in.UseNext, &out.UseNext + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupInitParameters. +func (in *OriginGroupInitParameters) DeepCopy() *OriginGroupInitParameters { + if in == nil { + return nil + } + out := new(OriginGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupList) DeepCopyInto(out *OriginGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OriginGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupList. +func (in *OriginGroupList) DeepCopy() *OriginGroupList { + if in == nil { + return nil + } + out := new(OriginGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OriginGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupObservation) DeepCopyInto(out *OriginGroupObservation) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UseNext != nil { + in, out := &in.UseNext, &out.UseNext + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupObservation. +func (in *OriginGroupObservation) DeepCopy() *OriginGroupObservation { + if in == nil { + return nil + } + out := new(OriginGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupParameters) DeepCopyInto(out *OriginGroupParameters) { + *out = *in + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UseNext != nil { + in, out := &in.UseNext, &out.UseNext + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupParameters. +func (in *OriginGroupParameters) DeepCopy() *OriginGroupParameters { + if in == nil { + return nil + } + out := new(OriginGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupSpec) DeepCopyInto(out *OriginGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupSpec. +func (in *OriginGroupSpec) DeepCopy() *OriginGroupSpec { + if in == nil { + return nil + } + out := new(OriginGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginGroupStatus) DeepCopyInto(out *OriginGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginGroupStatus. +func (in *OriginGroupStatus) DeepCopy() *OriginGroupStatus { + if in == nil { + return nil + } + out := new(OriginGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginInitParameters) DeepCopyInto(out *OriginInitParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginInitParameters. +func (in *OriginInitParameters) DeepCopy() *OriginInitParameters { + if in == nil { + return nil + } + out := new(OriginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginObservation) DeepCopyInto(out *OriginObservation) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginObservation. +func (in *OriginObservation) DeepCopy() *OriginObservation { + if in == nil { + return nil + } + out := new(OriginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginParameters) DeepCopyInto(out *OriginParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginParameters. +func (in *OriginParameters) DeepCopy() *OriginParameters { + if in == nil { + return nil + } + out := new(OriginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resource) DeepCopyInto(out *Resource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource. +func (in *Resource) DeepCopy() *Resource { + if in == nil { + return nil + } + out := new(Resource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Resource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceInitParameters) DeepCopyInto(out *ResourceInitParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.Cname != nil { + in, out := &in.Cname, &out.Cname + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.OriginGroupIDRef != nil { + in, out := &in.OriginGroupIDRef, &out.OriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupIDSelector != nil { + in, out := &in.OriginGroupIDSelector, &out.OriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupName != nil { + in, out := &in.OriginGroupName, &out.OriginGroupName + *out = new(string) + **out = **in + } + if in.OriginProtocol != nil { + in, out := &in.OriginProtocol, &out.OriginProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryHostnames != nil { + in, out := &in.SecondaryHostnames, &out.SecondaryHostnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInitParameters. +func (in *ResourceInitParameters) DeepCopy() *ResourceInitParameters { + if in == nil { + return nil + } + out := new(ResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceList) DeepCopyInto(out *ResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Resource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in *ResourceList) DeepCopy() *ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceObservation) DeepCopyInto(out *ResourceObservation) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.Cname != nil { + in, out := &in.Cname, &out.Cname + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.OriginGroupName != nil { + in, out := &in.OriginGroupName, &out.OriginGroupName + *out = new(string) + **out = **in + } + if in.OriginProtocol != nil { + in, out := &in.OriginProtocol, &out.OriginProtocol + *out = new(string) + **out = **in + } + if in.ProviderCname != nil { + in, out := &in.ProviderCname, &out.ProviderCname + *out = new(string) + **out = **in + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryHostnames != nil { + in, out := &in.SecondaryHostnames, &out.SecondaryHostnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceObservation. +func (in *ResourceObservation) DeepCopy() *ResourceObservation { + if in == nil { + return nil + } + out := new(ResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceParameters) DeepCopyInto(out *ResourceParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.Cname != nil { + in, out := &in.Cname, &out.Cname + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginGroupID != nil { + in, out := &in.OriginGroupID, &out.OriginGroupID + *out = new(float64) + **out = **in + } + if in.OriginGroupIDRef != nil { + in, out := &in.OriginGroupIDRef, &out.OriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupIDSelector != nil { + in, out := &in.OriginGroupIDSelector, &out.OriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginGroupName != nil { + in, out := &in.OriginGroupName, &out.OriginGroupName + *out = new(string) + **out = **in + } + if in.OriginProtocol != nil { + in, out := &in.OriginProtocol, &out.OriginProtocol + *out = new(string) + **out = **in + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryHostnames != nil { + in, out := &in.SecondaryHostnames, &out.SecondaryHostnames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceParameters. +func (in *ResourceParameters) DeepCopy() *ResourceParameters { + if in == nil { + return nil + } + out := new(ResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateInitParameters) DeepCopyInto(out *SSLCertificateInitParameters) { + *out = *in + if in.CertificateManagerID != nil { + in, out := &in.CertificateManagerID, &out.CertificateManagerID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateInitParameters. +func (in *SSLCertificateInitParameters) DeepCopy() *SSLCertificateInitParameters { + if in == nil { + return nil + } + out := new(SSLCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateObservation) DeepCopyInto(out *SSLCertificateObservation) { + *out = *in + if in.CertificateManagerID != nil { + in, out := &in.CertificateManagerID, &out.CertificateManagerID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateObservation. +func (in *SSLCertificateObservation) DeepCopy() *SSLCertificateObservation { + if in == nil { + return nil + } + out := new(SSLCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateParameters) DeepCopyInto(out *SSLCertificateParameters) { + *out = *in + if in.CertificateManagerID != nil { + in, out := &in.CertificateManagerID, &out.CertificateManagerID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateParameters. +func (in *SSLCertificateParameters) DeepCopy() *SSLCertificateParameters { + if in == nil { + return nil + } + out := new(SSLCertificateParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cdn/v1alpha1/zz_generated.managed.go b/apis/cdn/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..294db49 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.managed.go @@ -0,0 +1,125 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this OriginGroup. +func (mg *OriginGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OriginGroup. +func (mg *OriginGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OriginGroup. +func (mg *OriginGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OriginGroup. +func (mg *OriginGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OriginGroup. +func (mg *OriginGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OriginGroup. +func (mg *OriginGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OriginGroup. +func (mg *OriginGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OriginGroup. +func (mg *OriginGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OriginGroup. +func (mg *OriginGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OriginGroup. +func (mg *OriginGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OriginGroup. +func (mg *OriginGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OriginGroup. +func (mg *OriginGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Resource. +func (mg *Resource) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Resource. +func (mg *Resource) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Resource. +func (mg *Resource) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Resource. +func (mg *Resource) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Resource. +func (mg *Resource) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Resource. +func (mg *Resource) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Resource. +func (mg *Resource) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Resource. +func (mg *Resource) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Resource. +func (mg *Resource) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Resource. +func (mg *Resource) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Resource. +func (mg *Resource) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Resource. +func (mg *Resource) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cdn/v1alpha1/zz_generated.managedlist.go b/apis/cdn/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..00f2ce3 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,23 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this OriginGroupList. +func (l *OriginGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ResourceList. +func (l *ResourceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cdn/v1alpha1/zz_generated.resolvers.go b/apis/cdn/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..45e0381 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,128 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this OriginGroup. +func (mg *OriginGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Resource. +func (mg *Resource) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromFloatPtrValue(mg.Spec.ForProvider.OriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.OriginGroupIDRef, + Selector: mg.Spec.ForProvider.OriginGroupIDSelector, + To: reference.To{ + List: &OriginGroupList{}, + Managed: &OriginGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OriginGroupID") + } + mg.Spec.ForProvider.OriginGroupID = reference.ToFloatPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OriginGroupIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromFloatPtrValue(mg.Spec.InitProvider.OriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.OriginGroupIDRef, + Selector: mg.Spec.InitProvider.OriginGroupIDSelector, + To: reference.To{ + List: &OriginGroupList{}, + Managed: &OriginGroup{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OriginGroupID") + } + mg.Spec.InitProvider.OriginGroupID = reference.ToFloatPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OriginGroupIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cdn/v1alpha1/zz_groupversion_info.go b/apis/cdn/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..eda484c --- /dev/null +++ b/apis/cdn/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cdn.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cdn.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cdn/v1alpha1/zz_origingroup_terraformed.go b/apis/cdn/v1alpha1/zz_origingroup_terraformed.go new file mode 100755 index 0000000..8cdcff3 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_origingroup_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OriginGroup +func (mg *OriginGroup) GetTerraformResourceType() string { + return "yandex_cdn_origin_group" +} + +// GetConnectionDetailsMapping for this OriginGroup +func (tr *OriginGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this OriginGroup +func (tr *OriginGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OriginGroup +func (tr *OriginGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OriginGroup +func (tr *OriginGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OriginGroup +func (tr *OriginGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OriginGroup +func (tr *OriginGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OriginGroup +func (tr *OriginGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OriginGroup +func (tr *OriginGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OriginGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OriginGroup) LateInitialize(attrs []byte) (bool, error) { + params := &OriginGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OriginGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1alpha1/zz_origingroup_types.go b/apis/cdn/v1alpha1/zz_origingroup_types.go new file mode 100755 index 0000000..fce5b51 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_origingroup_types.go @@ -0,0 +1,165 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OriginGroupInitParameters struct { + + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // CDN Origin Group name used to define device. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Origin []OriginInitParameters `json:"origin,omitempty" tf:"origin,omitempty"` + + // If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. + UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` +} + +type OriginGroupObservation struct { + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // CDN Origin Group name used to define device. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Origin []OriginObservation `json:"origin,omitempty" tf:"origin,omitempty"` + + // If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. + UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` +} + +type OriginGroupParameters struct { + + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // CDN Origin Group name used to define device. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // +kubebuilder:validation:Optional + Origin []OriginParameters `json:"origin,omitempty" tf:"origin,omitempty"` + + // If the option is active (has true value), in case the origin responds with 4XX or 5XX codes, use the next origin from the list. + // +kubebuilder:validation:Optional + UseNext *bool `json:"useNext,omitempty" tf:"use_next,omitempty"` +} + +type OriginInitParameters struct { + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type OriginObservation struct { + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type OriginParameters struct { + + // +kubebuilder:validation:Optional + Backup *bool `json:"backup,omitempty" tf:"backup,omitempty"` + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // +kubebuilder:validation:Optional + Source *string `json:"source" tf:"source,omitempty"` +} + +// OriginGroupSpec defines the desired state of OriginGroup +type OriginGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OriginGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OriginGroupInitParameters `json:"initProvider,omitempty"` +} + +// OriginGroupStatus defines the observed state of OriginGroup. +type OriginGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OriginGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// OriginGroup is the Schema for the OriginGroups API. Allows management of a Yandex.Cloud CDN Origin Groups. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type OriginGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.origin) || (has(self.initProvider) && has(self.initProvider.origin))",message="spec.forProvider.origin is a required parameter" + Spec OriginGroupSpec `json:"spec"` + Status OriginGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OriginGroupList contains a list of OriginGroups +type OriginGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OriginGroup `json:"items"` +} + +// Repository type metadata. +var ( + OriginGroup_Kind = "OriginGroup" + OriginGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OriginGroup_Kind}.String() + OriginGroup_KindAPIVersion = OriginGroup_Kind + "." + CRDGroupVersion.String() + OriginGroup_GroupVersionKind = CRDGroupVersion.WithKind(OriginGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&OriginGroup{}, &OriginGroupList{}) +} diff --git a/apis/cdn/v1alpha1/zz_resource_terraformed.go b/apis/cdn/v1alpha1/zz_resource_terraformed.go new file mode 100755 index 0000000..77a2734 --- /dev/null +++ b/apis/cdn/v1alpha1/zz_resource_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Resource +func (mg *Resource) GetTerraformResourceType() string { + return "yandex_cdn_resource" +} + +// GetConnectionDetailsMapping for this Resource +func (tr *Resource) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Resource +func (tr *Resource) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Resource +func (tr *Resource) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Resource +func (tr *Resource) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Resource +func (tr *Resource) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Resource +func (tr *Resource) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Resource +func (tr *Resource) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Resource +func (tr *Resource) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Resource using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Resource) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Resource) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1alpha1/zz_resource_types.go b/apis/cdn/v1alpha1/zz_resource_types.go new file mode 100755 index 0000000..3f06ced --- /dev/null +++ b/apis/cdn/v1alpha1/zz_resource_types.go @@ -0,0 +1,528 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IPAddressACLInitParameters struct { + + // the list of specified IP addresses to be allowed or denied depending on acl policy type. + ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` + + // the policy type for ip_address_acl option, one of "allow" or "deny" values. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type IPAddressACLObservation struct { + + // the list of specified IP addresses to be allowed or denied depending on acl policy type. + ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` + + // the policy type for ip_address_acl option, one of "allow" or "deny" values. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type IPAddressACLParameters struct { + + // the list of specified IP addresses to be allowed or denied depending on acl policy type. + // +kubebuilder:validation:Optional + ExceptedValues []*string `json:"exceptedValues,omitempty" tf:"excepted_values,omitempty"` + + // the policy type for ip_address_acl option, one of "allow" or "deny" values. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type OptionsInitParameters struct { + + // HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. + AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` + + // set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. + BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` + + // list HTTP headers that must be included in responses to clients. + CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` + + // parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. + Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` + + // custom value for the Host header. Your server must be able to process requests with the chosen header. + CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` + + // wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. + CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` + + // setup a cache status. + DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` + + // disabling proxy force ranges. + DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` + + // content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. + EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` + + // enable access limiting by IP addresses, option available only with setting secure_key. + EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` + + // option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. + FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` + + // choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. + ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` + + // GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. + GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` + + IPAddressACL []IPAddressACLInitParameters `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` + + // set for ignoring cookie. + IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` + + // files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. + IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` + + // allows caching for GET, HEAD and POST requests. + ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` + + // files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. + QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` + + // files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. + QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` + + // set up a redirect from HTTPS to HTTP. + RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` + + // set up a redirect from HTTP to HTTPS. + RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` + + // set secure key for url encoding to protect contect and limit access by IP addresses and time limits. + SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` + + // files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. + Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` + + // set up custom headers that CDN servers will send in requests to origins. + // +mapType=granular + StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` + + // set up custom headers that CDN servers will send in response to clients. + // +mapType=granular + StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` +} + +type OptionsObservation struct { + + // HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. + AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` + + // set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. + BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` + + // list HTTP headers that must be included in responses to clients. + CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` + + // parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. + Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` + + // custom value for the Host header. Your server must be able to process requests with the chosen header. + CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` + + // wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. + CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` + + // setup a cache status. + DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` + + // disabling proxy force ranges. + DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` + + // content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. + EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` + + // enable access limiting by IP addresses, option available only with setting secure_key. + EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` + + // option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. + FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` + + // choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. + ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` + + // GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. + GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` + + IPAddressACL []IPAddressACLObservation `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` + + // set for ignoring cookie. + IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` + + // files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. + IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` + + // allows caching for GET, HEAD and POST requests. + ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` + + // files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. + QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` + + // files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. + QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` + + // set up a redirect from HTTPS to HTTP. + RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` + + // set up a redirect from HTTP to HTTPS. + RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` + + // set secure key for url encoding to protect contect and limit access by IP addresses and time limits. + SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` + + // files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. + Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` + + // set up custom headers that CDN servers will send in requests to origins. + // +mapType=granular + StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` + + // set up custom headers that CDN servers will send in response to clients. + // +mapType=granular + StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` +} + +type OptionsParameters struct { + + // HTTP methods for your CDN content. By default the following methods are allowed: GET, HEAD, POST, PUT, PATCH, DELETE, OPTIONS. In case some methods are not allowed to the user, they will get the 405 (Method Not Allowed) response. If the method is not supported, the user gets the 501 (Not Implemented) response. + // +kubebuilder:validation:Optional + AllowedHTTPMethods []*string `json:"allowedHttpMethods,omitempty" tf:"allowed_http_methods,omitempty"` + + // set up a cache period for the end-users browser. Content will be cached due to origin settings. If there are no cache settings on your origin, the content will not be cached. The list of HTTP response codes that can be cached in browsers: 200, 201, 204, 206, 301, 302, 303, 304, 307, 308. Other response codes will not be cached. The default value is 4 days. + // +kubebuilder:validation:Optional + BrowserCacheSettings *float64 `json:"browserCacheSettings,omitempty" tf:"browser_cache_settings,omitempty"` + + // list HTTP headers that must be included in responses to clients. + // +kubebuilder:validation:Optional + CacheHTTPHeaders []*string `json:"cacheHttpHeaders,omitempty" tf:"cache_http_headers,omitempty"` + + // parameter that lets browsers get access to selected resources from a domain different to a domain from which the request is received. + // +kubebuilder:validation:Optional + Cors []*string `json:"cors,omitempty" tf:"cors,omitempty"` + + // custom value for the Host header. Your server must be able to process requests with the chosen header. + // +kubebuilder:validation:Optional + CustomHostHeader *string `json:"customHostHeader,omitempty" tf:"custom_host_header,omitempty"` + + // wildcard additional CNAME. If a resource has a wildcard additional CNAME, you can use your own certificate for content delivery via HTTPS. Read-only. + // +kubebuilder:validation:Optional + CustomServerName *string `json:"customServerName,omitempty" tf:"custom_server_name,omitempty"` + + // setup a cache status. + // +kubebuilder:validation:Optional + DisableCache *bool `json:"disableCache,omitempty" tf:"disable_cache,omitempty"` + + // disabling proxy force ranges. + // +kubebuilder:validation:Optional + DisableProxyForceRanges *bool `json:"disableProxyForceRanges,omitempty" tf:"disable_proxy_force_ranges,omitempty"` + + // content will be cached according to origin cache settings. The value applies for a response with codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 if an origin server does not have caching HTTP headers. Responses with other codes will not be cached. + // +kubebuilder:validation:Optional + EdgeCacheSettings *float64 `json:"edgeCacheSettings,omitempty" tf:"edge_cache_settings,omitempty"` + + // enable access limiting by IP addresses, option available only with setting secure_key. + // +kubebuilder:validation:Optional + EnableIPURLSigning *bool `json:"enableIpUrlSigning,omitempty" tf:"enable_ip_url_signing,omitempty"` + + // option helps you to reduce the bandwidth between origin and CDN servers. Also, content delivery speed becomes higher because of reducing the time for compressing files in a CDN. + // +kubebuilder:validation:Optional + FetchedCompressed *bool `json:"fetchedCompressed,omitempty" tf:"fetched_compressed,omitempty"` + + // choose the Forward Host header option if is important to send in the request to the Origin the same Host header as was sent in the request to CDN server. + // +kubebuilder:validation:Optional + ForwardHostHeader *bool `json:"forwardHostHeader,omitempty" tf:"forward_host_header,omitempty"` + + // GZip compression at CDN servers reduces file size by 70% and can be as high as 90%. + // +kubebuilder:validation:Optional + GzipOn *bool `json:"gzipOn,omitempty" tf:"gzip_on,omitempty"` + + // +kubebuilder:validation:Optional + IPAddressACL []IPAddressACLParameters `json:"ipAddressAcl,omitempty" tf:"ip_address_acl,omitempty"` + + // set for ignoring cookie. + // +kubebuilder:validation:Optional + IgnoreCookie *bool `json:"ignoreCookie,omitempty" tf:"ignore_cookie,omitempty"` + + // files with different query parameters are cached as objects with the same key regardless of the parameter value. selected by default. + // +kubebuilder:validation:Optional + IgnoreQueryParams *bool `json:"ignoreQueryParams,omitempty" tf:"ignore_query_params,omitempty"` + + // allows caching for GET, HEAD and POST requests. + // +kubebuilder:validation:Optional + ProxyCacheMethodsSet *bool `json:"proxyCacheMethodsSet,omitempty" tf:"proxy_cache_methods_set,omitempty"` + + // files with the specified query parameters are cached as objects with the same key, files with other parameters are cached as objects with different keys. + // +kubebuilder:validation:Optional + QueryParamsBlacklist []*string `json:"queryParamsBlacklist,omitempty" tf:"query_params_blacklist,omitempty"` + + // files with the specified query parameters are cached as objects with different keys, files with other parameters are cached as objects with the same key. + // +kubebuilder:validation:Optional + QueryParamsWhitelist []*string `json:"queryParamsWhitelist,omitempty" tf:"query_params_whitelist,omitempty"` + + // set up a redirect from HTTPS to HTTP. + // +kubebuilder:validation:Optional + RedirectHTTPSToHTTP *bool `json:"redirectHttpsToHttp,omitempty" tf:"redirect_https_to_http,omitempty"` + + // set up a redirect from HTTP to HTTPS. + // +kubebuilder:validation:Optional + RedirectHTTPToHTTPS *bool `json:"redirectHttpToHttps,omitempty" tf:"redirect_http_to_https,omitempty"` + + // set secure key for url encoding to protect contect and limit access by IP addresses and time limits. + // +kubebuilder:validation:Optional + SecureKey *string `json:"secureKey,omitempty" tf:"secure_key,omitempty"` + + // files larger than 10 MB will be requested and cached in parts (no larger than 10 MB each part). It reduces time to first byte. The origin must support HTTP Range requests. + // +kubebuilder:validation:Optional + Slice *bool `json:"slice,omitempty" tf:"slice,omitempty"` + + // set up custom headers that CDN servers will send in requests to origins. + // +kubebuilder:validation:Optional + // +mapType=granular + StaticRequestHeaders map[string]*string `json:"staticRequestHeaders,omitempty" tf:"static_request_headers,omitempty"` + + // set up custom headers that CDN servers will send in response to clients. + // +kubebuilder:validation:Optional + // +mapType=granular + StaticResponseHeaders map[string]*string `json:"staticResponseHeaders,omitempty" tf:"static_response_headers,omitempty"` +} + +type ResourceInitParameters struct { + + // Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // CDN endpoint CNAME, must be unique among resources. + Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` + + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // CDN Resource settings and options to tune CDN edge behavior. + Options []OptionsInitParameters `json:"options,omitempty" tf:"options,omitempty"` + + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1.OriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + + // Reference to a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDRef *v1.Reference `json:"originGroupIdRef,omitempty" tf:"-"` + + // Selector for a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDSelector *v1.Selector `json:"originGroupIdSelector,omitempty" tf:"-"` + + OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` + + OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` + + // SSL certificate of CDN resource. + SSLCertificate []SSLCertificateInitParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + + // list of secondary hostname strings. + // +listType=set + SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` + + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type ResourceObservation struct { + + // Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // CDN endpoint CNAME, must be unique among resources. + Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` + + // Creation timestamp of the IoT Core Device + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // CDN Resource settings and options to tune CDN edge behavior. + Options []OptionsObservation `json:"options,omitempty" tf:"options,omitempty"` + + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + + OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` + + OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` + + // provider CNAME of CDN resource, computed value for read and update operations. + ProviderCname *string `json:"providerCname,omitempty" tf:"provider_cname,omitempty"` + + // SSL certificate of CDN resource. + SSLCertificate []SSLCertificateObservation `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + + // list of secondary hostname strings. + // +listType=set + SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` + + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type ResourceParameters struct { + + // Flag to create Resource either in active or disabled state. True - the content from CDN is available to clients. + // +kubebuilder:validation:Optional + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // CDN endpoint CNAME, must be unique among resources. + // +kubebuilder:validation:Optional + Cname *string `json:"cname,omitempty" tf:"cname,omitempty"` + + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // CDN Resource settings and options to tune CDN edge behavior. + // +kubebuilder:validation:Optional + Options []OptionsParameters `json:"options,omitempty" tf:"options,omitempty"` + + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1.OriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + OriginGroupID *float64 `json:"originGroupId,omitempty" tf:"origin_group_id,omitempty"` + + // Reference to a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDRef *v1.Reference `json:"originGroupIdRef,omitempty" tf:"-"` + + // Selector for a OriginGroup in cdn to populate originGroupId. + // +kubebuilder:validation:Optional + OriginGroupIDSelector *v1.Selector `json:"originGroupIdSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + OriginGroupName *string `json:"originGroupName,omitempty" tf:"origin_group_name,omitempty"` + + // +kubebuilder:validation:Optional + OriginProtocol *string `json:"originProtocol,omitempty" tf:"origin_protocol,omitempty"` + + // SSL certificate of CDN resource. + // +kubebuilder:validation:Optional + SSLCertificate []SSLCertificateParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + + // list of secondary hostname strings. + // +kubebuilder:validation:Optional + // +listType=set + SecondaryHostnames []*string `json:"secondaryHostnames,omitempty" tf:"secondary_hostnames,omitempty"` + + // +kubebuilder:validation:Optional + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type SSLCertificateInitParameters struct { + CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SSLCertificateObservation struct { + CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` + + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SSLCertificateParameters struct { + + // +kubebuilder:validation:Optional + CertificateManagerID *string `json:"certificateManagerId,omitempty" tf:"certificate_manager_id,omitempty"` + + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// ResourceSpec defines the desired state of Resource +type ResourceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceInitParameters `json:"initProvider,omitempty"` +} + +// ResourceStatus defines the observed state of Resource. +type ResourceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Resource is the Schema for the Resources API. Allows management of a Yandex.Cloud CDN Resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Resource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceSpec `json:"spec"` + Status ResourceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceList contains a list of Resources +type ResourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Resource `json:"items"` +} + +// Repository type metadata. +var ( + Resource_Kind = "Resource" + Resource_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Resource_Kind}.String() + Resource_KindAPIVersion = Resource_Kind + "." + CRDGroupVersion.String() + Resource_GroupVersionKind = CRDGroupVersion.WithKind(Resource_Kind) +) + +func init() { + SchemeBuilder.Register(&Resource{}, &ResourceList{}) +} diff --git a/apis/cm/v1alpha1/zz_certificate_terraformed.go b/apis/cm/v1alpha1/zz_certificate_terraformed.go new file mode 100755 index 0000000..791640b --- /dev/null +++ b/apis/cm/v1alpha1/zz_certificate_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Certificate +func (mg *Certificate) GetTerraformResourceType() string { + return "yandex_cm_certificate" +} + +// GetConnectionDetailsMapping for this Certificate +func (tr *Certificate) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"self_managed[*].private_key": "selfManaged[*].privateKeySecretRef"} +} + +// GetObservation of this Certificate +func (tr *Certificate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Certificate +func (tr *Certificate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Certificate +func (tr *Certificate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Certificate +func (tr *Certificate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Certificate +func (tr *Certificate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Certificate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Certificate) LateInitialize(attrs []byte) (bool, error) { + params := &CertificateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Certificate) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cm/v1alpha1/zz_certificate_types.go b/apis/cm/v1alpha1/zz_certificate_types.go new file mode 100755 index 0000000..14d27f9 --- /dev/null +++ b/apis/cm/v1alpha1/zz_certificate_types.go @@ -0,0 +1,353 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CertificateInitParameters struct { + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Certificate description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Domains for this certificate. Should be specified for managed certificates. + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this certificate. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Managed specification. Structure is documented below. + Managed []ManagedInitParameters `json:"managed,omitempty" tf:"managed,omitempty"` + + // Certificate name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Self-managed specification. Structure is documented below. + SelfManaged []SelfManagedInitParameters `json:"selfManaged,omitempty" tf:"self_managed,omitempty"` +} + +type CertificateObservation struct { + + // Array of challenges. Structure is documented below. + Challenges []ChallengesObservation `json:"challenges,omitempty" tf:"challenges,omitempty"` + + // Certificate create timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Certificate description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Domains for this certificate. Should be specified for managed certificates. + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Certificate Id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Certificate issue timestamp. + IssuedAt *string `json:"issuedAt,omitempty" tf:"issued_at,omitempty"` + + // Certificate issuer. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // Labels to assign to this certificate. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Managed specification. Structure is documented below. + Managed []ManagedObservation `json:"managed,omitempty" tf:"managed,omitempty"` + + // Certificate name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Certificate end valid period. + NotAfter *string `json:"notAfter,omitempty" tf:"not_after,omitempty"` + + // Certificate start valid period. + NotBefore *string `json:"notBefore,omitempty" tf:"not_before,omitempty"` + + // Self-managed specification. Structure is documented below. + SelfManaged []SelfManagedObservation `json:"selfManaged,omitempty" tf:"self_managed,omitempty"` + + // Certificate serial number. + Serial *string `json:"serial,omitempty" tf:"serial,omitempty"` + + // Certificate status: "VALIDATING", "INVALID", "ISSUED", "REVOKED", "RENEWING" or "RENEWAL_FAILED". + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Certificate subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Certificate type: "MANAGED" or "IMPORTED". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Certificate update timestamp. + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type CertificateParameters struct { + + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Certificate description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Domains for this certificate. Should be specified for managed certificates. + // +kubebuilder:validation:Optional + Domains []*string `json:"domains,omitempty" tf:"domains,omitempty"` + + // Folder that the resource belongs to. If value is omitted, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Labels to assign to this certificate. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Managed specification. Structure is documented below. + // +kubebuilder:validation:Optional + Managed []ManagedParameters `json:"managed,omitempty" tf:"managed,omitempty"` + + // Certificate name. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Self-managed specification. Structure is documented below. + // +kubebuilder:validation:Optional + SelfManaged []SelfManagedParameters `json:"selfManaged,omitempty" tf:"self_managed,omitempty"` +} + +type ChallengesInitParameters struct { +} + +type ChallengesObservation struct { + + // Time the challenge was created. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // DNS record name (only for DNS challenge). + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // DNS record type: "TXT" or "CNAME" (only for DNS challenge). + DNSType *string `json:"dnsType,omitempty" tf:"dns_type,omitempty"` + + // DNS record value (only for DNS challenge). + DNSValue *string `json:"dnsValue,omitempty" tf:"dns_value,omitempty"` + + // Validated domain. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The content that should be made accessible with the given http_url (only for HTTP challenge). + HTTPContent *string `json:"httpContent,omitempty" tf:"http_content,omitempty"` + + // URL where the challenge content http_content should be placed (only for HTTP challenge). + HTTPURL *string `json:"httpUrl,omitempty" tf:"http_url,omitempty"` + + // Current status message. + Message *string `json:"message,omitempty" tf:"message,omitempty"` + + // Challenge type "DNS" or "HTTP". + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Last time the challenge was updated. + UpdatedAt *string `json:"updatedAt,omitempty" tf:"updated_at,omitempty"` +} + +type ChallengesParameters struct { +} + +type ManagedInitParameters struct { + + // . Expected number of challenge count needed to validate certificate. + // Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. + // This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + ChallengeCount *float64 `json:"challengeCount,omitempty" tf:"challenge_count,omitempty"` + + // Domain owner-check method. Possible values: + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` +} + +type ManagedObservation struct { + + // . Expected number of challenge count needed to validate certificate. + // Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. + // This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + ChallengeCount *float64 `json:"challengeCount,omitempty" tf:"challenge_count,omitempty"` + + // Domain owner-check method. Possible values: + ChallengeType *string `json:"challengeType,omitempty" tf:"challenge_type,omitempty"` +} + +type ManagedParameters struct { + + // . Expected number of challenge count needed to validate certificate. + // Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. + // This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + // +kubebuilder:validation:Optional + ChallengeCount *float64 `json:"challengeCount,omitempty" tf:"challenge_count,omitempty"` + + // Domain owner-check method. Possible values: + // +kubebuilder:validation:Optional + ChallengeType *string `json:"challengeType" tf:"challenge_type,omitempty"` +} + +type PrivateKeyLockboxSecretInitParameters struct { + + // Lockbox secret Id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key of the Lockbox secret, the value of which contains the private key of the certificate. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type PrivateKeyLockboxSecretObservation struct { + + // Lockbox secret Id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Key of the Lockbox secret, the value of which contains the private key of the certificate. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type PrivateKeyLockboxSecretParameters struct { + + // Lockbox secret Id. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Key of the Lockbox secret, the value of which contains the private key of the certificate. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type SelfManagedInitParameters struct { + + // Certificate with chain. + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Lockbox secret specification for getting private key. Structure is documented below. + PrivateKeyLockboxSecret []PrivateKeyLockboxSecretInitParameters `json:"privateKeyLockboxSecret,omitempty" tf:"private_key_lockbox_secret,omitempty"` + + // Private key of certificate. + PrivateKeySecretRef *v1.SecretKeySelector `json:"privateKeySecretRef,omitempty" tf:"-"` +} + +type SelfManagedObservation struct { + + // Certificate with chain. + Certificate *string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Lockbox secret specification for getting private key. Structure is documented below. + PrivateKeyLockboxSecret []PrivateKeyLockboxSecretObservation `json:"privateKeyLockboxSecret,omitempty" tf:"private_key_lockbox_secret,omitempty"` +} + +type SelfManagedParameters struct { + + // Certificate with chain. + // +kubebuilder:validation:Optional + Certificate *string `json:"certificate" tf:"certificate,omitempty"` + + // Lockbox secret specification for getting private key. Structure is documented below. + // +kubebuilder:validation:Optional + PrivateKeyLockboxSecret []PrivateKeyLockboxSecretParameters `json:"privateKeyLockboxSecret,omitempty" tf:"private_key_lockbox_secret,omitempty"` + + // Private key of certificate. + // +kubebuilder:validation:Optional + PrivateKeySecretRef *v1.SecretKeySelector `json:"privateKeySecretRef,omitempty" tf:"-"` +} + +// CertificateSpec defines the desired state of Certificate +type CertificateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CertificateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CertificateInitParameters `json:"initProvider,omitempty"` +} + +// CertificateStatus defines the observed state of Certificate. +type CertificateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CertificateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Certificate is the Schema for the Certificates API. A TLS certificate signed by a certification authority confirming that it belongs to the owner of the domain name. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Certificate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CertificateSpec `json:"spec"` + Status CertificateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CertificateList contains a list of Certificates +type CertificateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Certificate `json:"items"` +} + +// Repository type metadata. +var ( + Certificate_Kind = "Certificate" + Certificate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Certificate_Kind}.String() + Certificate_KindAPIVersion = Certificate_Kind + "." + CRDGroupVersion.String() + Certificate_GroupVersionKind = CRDGroupVersion.WithKind(Certificate_Kind) +) + +func init() { + SchemeBuilder.Register(&Certificate{}, &CertificateList{}) +} diff --git a/apis/cm/v1alpha1/zz_generated.conversion_hubs.go b/apis/cm/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..8006de9 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Certificate) Hub() {} diff --git a/apis/cm/v1alpha1/zz_generated.deepcopy.go b/apis/cm/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b05de38 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,750 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Certificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = make([]ManagedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelfManaged != nil { + in, out := &in.SelfManaged, &out.SelfManaged + *out = make([]SelfManagedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateList) DeepCopyInto(out *CertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Certificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateList. +func (in *CertificateList) DeepCopy() *CertificateList { + if in == nil { + return nil + } + out := new(CertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.Challenges != nil { + in, out := &in.Challenges, &out.Challenges + *out = make([]ChallengesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IssuedAt != nil { + in, out := &in.IssuedAt, &out.IssuedAt + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = make([]ManagedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotAfter != nil { + in, out := &in.NotAfter, &out.NotAfter + *out = new(string) + **out = **in + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = new(string) + **out = **in + } + if in.SelfManaged != nil { + in, out := &in.SelfManaged, &out.SelfManaged + *out = make([]SelfManagedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Serial != nil { + in, out := &in.Serial, &out.Serial + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = make([]ManagedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SelfManaged != nil { + in, out := &in.SelfManaged, &out.SelfManaged + *out = make([]SelfManagedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec. +func (in *CertificateSpec) DeepCopy() *CertificateSpec { + if in == nil { + return nil + } + out := new(CertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateStatus. +func (in *CertificateStatus) DeepCopy() *CertificateStatus { + if in == nil { + return nil + } + out := new(CertificateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengesInitParameters) DeepCopyInto(out *ChallengesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengesInitParameters. +func (in *ChallengesInitParameters) DeepCopy() *ChallengesInitParameters { + if in == nil { + return nil + } + out := new(ChallengesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengesObservation) DeepCopyInto(out *ChallengesObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DNSType != nil { + in, out := &in.DNSType, &out.DNSType + *out = new(string) + **out = **in + } + if in.DNSValue != nil { + in, out := &in.DNSValue, &out.DNSValue + *out = new(string) + **out = **in + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.HTTPContent != nil { + in, out := &in.HTTPContent, &out.HTTPContent + *out = new(string) + **out = **in + } + if in.HTTPURL != nil { + in, out := &in.HTTPURL, &out.HTTPURL + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UpdatedAt != nil { + in, out := &in.UpdatedAt, &out.UpdatedAt + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengesObservation. +func (in *ChallengesObservation) DeepCopy() *ChallengesObservation { + if in == nil { + return nil + } + out := new(ChallengesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChallengesParameters) DeepCopyInto(out *ChallengesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChallengesParameters. +func (in *ChallengesParameters) DeepCopy() *ChallengesParameters { + if in == nil { + return nil + } + out := new(ChallengesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedInitParameters) DeepCopyInto(out *ManagedInitParameters) { + *out = *in + if in.ChallengeCount != nil { + in, out := &in.ChallengeCount, &out.ChallengeCount + *out = new(float64) + **out = **in + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedInitParameters. +func (in *ManagedInitParameters) DeepCopy() *ManagedInitParameters { + if in == nil { + return nil + } + out := new(ManagedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedObservation) DeepCopyInto(out *ManagedObservation) { + *out = *in + if in.ChallengeCount != nil { + in, out := &in.ChallengeCount, &out.ChallengeCount + *out = new(float64) + **out = **in + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedObservation. +func (in *ManagedObservation) DeepCopy() *ManagedObservation { + if in == nil { + return nil + } + out := new(ManagedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedParameters) DeepCopyInto(out *ManagedParameters) { + *out = *in + if in.ChallengeCount != nil { + in, out := &in.ChallengeCount, &out.ChallengeCount + *out = new(float64) + **out = **in + } + if in.ChallengeType != nil { + in, out := &in.ChallengeType, &out.ChallengeType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedParameters. +func (in *ManagedParameters) DeepCopy() *ManagedParameters { + if in == nil { + return nil + } + out := new(ManagedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyLockboxSecretInitParameters) DeepCopyInto(out *PrivateKeyLockboxSecretInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyLockboxSecretInitParameters. +func (in *PrivateKeyLockboxSecretInitParameters) DeepCopy() *PrivateKeyLockboxSecretInitParameters { + if in == nil { + return nil + } + out := new(PrivateKeyLockboxSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyLockboxSecretObservation) DeepCopyInto(out *PrivateKeyLockboxSecretObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyLockboxSecretObservation. +func (in *PrivateKeyLockboxSecretObservation) DeepCopy() *PrivateKeyLockboxSecretObservation { + if in == nil { + return nil + } + out := new(PrivateKeyLockboxSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateKeyLockboxSecretParameters) DeepCopyInto(out *PrivateKeyLockboxSecretParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateKeyLockboxSecretParameters. +func (in *PrivateKeyLockboxSecretParameters) DeepCopy() *PrivateKeyLockboxSecretParameters { + if in == nil { + return nil + } + out := new(PrivateKeyLockboxSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedInitParameters) DeepCopyInto(out *SelfManagedInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.PrivateKeyLockboxSecret != nil { + in, out := &in.PrivateKeyLockboxSecret, &out.PrivateKeyLockboxSecret + *out = make([]PrivateKeyLockboxSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateKeySecretRef != nil { + in, out := &in.PrivateKeySecretRef, &out.PrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedInitParameters. +func (in *SelfManagedInitParameters) DeepCopy() *SelfManagedInitParameters { + if in == nil { + return nil + } + out := new(SelfManagedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedObservation) DeepCopyInto(out *SelfManagedObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.PrivateKeyLockboxSecret != nil { + in, out := &in.PrivateKeyLockboxSecret, &out.PrivateKeyLockboxSecret + *out = make([]PrivateKeyLockboxSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedObservation. +func (in *SelfManagedObservation) DeepCopy() *SelfManagedObservation { + if in == nil { + return nil + } + out := new(SelfManagedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfManagedParameters) DeepCopyInto(out *SelfManagedParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(string) + **out = **in + } + if in.PrivateKeyLockboxSecret != nil { + in, out := &in.PrivateKeyLockboxSecret, &out.PrivateKeyLockboxSecret + *out = make([]PrivateKeyLockboxSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateKeySecretRef != nil { + in, out := &in.PrivateKeySecretRef, &out.PrivateKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfManagedParameters. +func (in *SelfManagedParameters) DeepCopy() *SelfManagedParameters { + if in == nil { + return nil + } + out := new(SelfManagedParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cm/v1alpha1/zz_generated.managed.go b/apis/cm/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..33f92cd --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Certificate. +func (mg *Certificate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Certificate. +func (mg *Certificate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Certificate. +func (mg *Certificate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Certificate. +func (mg *Certificate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Certificate. +func (mg *Certificate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Certificate. +func (mg *Certificate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Certificate. +func (mg *Certificate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Certificate. +func (mg *Certificate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cm/v1alpha1/zz_generated.managedlist.go b/apis/cm/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..f0b50f7 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CertificateList. +func (l *CertificateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cm/v1alpha1/zz_generated.resolvers.go b/apis/cm/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..5dcddb6 --- /dev/null +++ b/apis/cm/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Certificate. +func (mg *Certificate) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cm/v1alpha1/zz_groupversion_info.go b/apis/cm/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..bd1932e --- /dev/null +++ b/apis/cm/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cm.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cm.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/compute/v1alpha1/zz_diskiambinding_terraformed.go b/apis/compute/v1alpha1/zz_diskiambinding_terraformed.go new file mode 100755 index 0000000..bf9f07d --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DiskIAMBinding +func (mg *DiskIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_disk_iam_binding" +} + +// GetConnectionDetailsMapping for this DiskIAMBinding +func (tr *DiskIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DiskIAMBinding +func (tr *DiskIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DiskIAMBinding +func (tr *DiskIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DiskIAMBinding +func (tr *DiskIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DiskIAMBinding +func (tr *DiskIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DiskIAMBinding +func (tr *DiskIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DiskIAMBinding +func (tr *DiskIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DiskIAMBinding +func (tr *DiskIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DiskIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DiskIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &DiskIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DiskIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_diskiambinding_types.go b/apis/compute/v1alpha1/zz_diskiambinding_types.go new file mode 100755 index 0000000..0999949 --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskiambinding_types.go @@ -0,0 +1,124 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskIAMBindingInitParameters struct { + + // ID of the disk to attach the policy to. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_disk_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskIAMBindingObservation struct { + + // ID of the disk to attach the policy to. + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_disk_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskIAMBindingParameters struct { + + // ID of the disk to attach the policy to. + // +kubebuilder:validation:Optional + DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_disk_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// DiskIAMBindingSpec defines the desired state of DiskIAMBinding +type DiskIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// DiskIAMBindingStatus defines the observed state of DiskIAMBinding. +type DiskIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DiskIAMBinding is the Schema for the DiskIAMBindings API. Allows management of a single IAM binding for a Disk. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DiskIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.diskId) || (has(self.initProvider) && has(self.initProvider.diskId))",message="spec.forProvider.diskId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec DiskIAMBindingSpec `json:"spec"` + Status DiskIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskIAMBindingList contains a list of DiskIAMBindings +type DiskIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DiskIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + DiskIAMBinding_Kind = "DiskIAMBinding" + DiskIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DiskIAMBinding_Kind}.String() + DiskIAMBinding_KindAPIVersion = DiskIAMBinding_Kind + "." + CRDGroupVersion.String() + DiskIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(DiskIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&DiskIAMBinding{}, &DiskIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_terraformed.go b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_terraformed.go new file mode 100755 index 0000000..395748b --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DiskPlacementGroupIAMBinding +func (mg *DiskPlacementGroupIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_disk_placement_group_iam_binding" +} + +// GetConnectionDetailsMapping for this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DiskPlacementGroupIAMBinding +func (tr *DiskPlacementGroupIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DiskPlacementGroupIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DiskPlacementGroupIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &DiskPlacementGroupIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DiskPlacementGroupIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_types.go b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_types.go new file mode 100755 index 0000000..796ce8a --- /dev/null +++ b/apis/compute/v1alpha1/zz_diskplacementgroupiambinding_types.go @@ -0,0 +1,124 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskPlacementGroupIAMBindingInitParameters struct { + + // ID of the disk placement group to attach the policy to. + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_disk_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskPlacementGroupIAMBindingObservation struct { + + // ID of the disk placement group to attach the policy to. + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_disk_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type DiskPlacementGroupIAMBindingParameters struct { + + // ID of the disk placement group to attach the policy to. + // +kubebuilder:validation:Optional + DiskPlacementGroupID *string `json:"diskPlacementGroupId,omitempty" tf:"disk_placement_group_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_disk_placement_group_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// DiskPlacementGroupIAMBindingSpec defines the desired state of DiskPlacementGroupIAMBinding +type DiskPlacementGroupIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskPlacementGroupIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskPlacementGroupIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// DiskPlacementGroupIAMBindingStatus defines the observed state of DiskPlacementGroupIAMBinding. +type DiskPlacementGroupIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskPlacementGroupIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// DiskPlacementGroupIAMBinding is the Schema for the DiskPlacementGroupIAMBindings API. Allows management of a single IAM binding for a Disk Placement Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type DiskPlacementGroupIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.diskPlacementGroupId) || (has(self.initProvider) && has(self.initProvider.diskPlacementGroupId))",message="spec.forProvider.diskPlacementGroupId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec DiskPlacementGroupIAMBindingSpec `json:"spec"` + Status DiskPlacementGroupIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskPlacementGroupIAMBindingList contains a list of DiskPlacementGroupIAMBindings +type DiskPlacementGroupIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DiskPlacementGroupIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + DiskPlacementGroupIAMBinding_Kind = "DiskPlacementGroupIAMBinding" + DiskPlacementGroupIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DiskPlacementGroupIAMBinding_Kind}.String() + DiskPlacementGroupIAMBinding_KindAPIVersion = DiskPlacementGroupIAMBinding_Kind + "." + CRDGroupVersion.String() + DiskPlacementGroupIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(DiskPlacementGroupIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&DiskPlacementGroupIAMBinding{}, &DiskPlacementGroupIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_filesystemiambinding_terraformed.go b/apis/compute/v1alpha1/zz_filesystemiambinding_terraformed.go new file mode 100755 index 0000000..afc7bb9 --- /dev/null +++ b/apis/compute/v1alpha1/zz_filesystemiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FilesystemIAMBinding +func (mg *FilesystemIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_filesystem_iam_binding" +} + +// GetConnectionDetailsMapping for this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FilesystemIAMBinding +func (tr *FilesystemIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FilesystemIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FilesystemIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &FilesystemIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FilesystemIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_filesystemiambinding_types.go b/apis/compute/v1alpha1/zz_filesystemiambinding_types.go new file mode 100755 index 0000000..d586eaf --- /dev/null +++ b/apis/compute/v1alpha1/zz_filesystemiambinding_types.go @@ -0,0 +1,124 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FilesystemIAMBindingInitParameters struct { + + // ID of the filesystem to attach the policy to. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_filesystem_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type FilesystemIAMBindingObservation struct { + + // ID of the filesystem to attach the policy to. + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_filesystem_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type FilesystemIAMBindingParameters struct { + + // ID of the filesystem to attach the policy to. + // +kubebuilder:validation:Optional + FilesystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_filesystem_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// FilesystemIAMBindingSpec defines the desired state of FilesystemIAMBinding +type FilesystemIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FilesystemIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FilesystemIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// FilesystemIAMBindingStatus defines the observed state of FilesystemIAMBinding. +type FilesystemIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FilesystemIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// FilesystemIAMBinding is the Schema for the FilesystemIAMBindings API. Allows management of a single IAM binding for a Filesystem. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type FilesystemIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.filesystemId) || (has(self.initProvider) && has(self.initProvider.filesystemId))",message="spec.forProvider.filesystemId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec FilesystemIAMBindingSpec `json:"spec"` + Status FilesystemIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FilesystemIAMBindingList contains a list of FilesystemIAMBindings +type FilesystemIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FilesystemIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + FilesystemIAMBinding_Kind = "FilesystemIAMBinding" + FilesystemIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FilesystemIAMBinding_Kind}.String() + FilesystemIAMBinding_KindAPIVersion = FilesystemIAMBinding_Kind + "." + CRDGroupVersion.String() + FilesystemIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(FilesystemIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&FilesystemIAMBinding{}, &FilesystemIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_generated.conversion_hubs.go b/apis/compute/v1alpha1/zz_generated.conversion_hubs.go index fb1fab8..5f1200c 100755 --- a/apis/compute/v1alpha1/zz_generated.conversion_hubs.go +++ b/apis/compute/v1alpha1/zz_generated.conversion_hubs.go @@ -5,29 +5,56 @@ package v1alpha1 // Hub marks this type as a conversion hub. func (tr *Disk) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *DiskIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *DiskPlacementGroup) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *DiskPlacementGroupIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *Filesystem) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *FilesystemIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *GpuCluster) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *GpuClusterIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *Image) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *ImageIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *Instance) Hub() {} // Hub marks this type as a conversion hub. func (tr *InstanceGroup) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *InstanceIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *PlacementGroup) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *PlacementGroupIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *Snapshot) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *SnapshotIAMBinding) Hub() {} + // Hub marks this type as a conversion hub. func (tr *SnapshotSchedule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SnapshotScheduleIAMBinding) Hub() {} diff --git a/apis/compute/v1alpha1/zz_generated.deepcopy.go b/apis/compute/v1alpha1/zz_generated.deepcopy.go index 4111d91..aa3ec91 100644 --- a/apis/compute/v1alpha1/zz_generated.deepcopy.go +++ b/apis/compute/v1alpha1/zz_generated.deepcopy.go @@ -1178,6 +1178,213 @@ func (in *Disk) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBinding) DeepCopyInto(out *DiskIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBinding. +func (in *DiskIAMBinding) DeepCopy() *DiskIAMBinding { + if in == nil { + return nil + } + out := new(DiskIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingInitParameters) DeepCopyInto(out *DiskIAMBindingInitParameters) { + *out = *in + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingInitParameters. +func (in *DiskIAMBindingInitParameters) DeepCopy() *DiskIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(DiskIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingList) DeepCopyInto(out *DiskIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DiskIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingList. +func (in *DiskIAMBindingList) DeepCopy() *DiskIAMBindingList { + if in == nil { + return nil + } + out := new(DiskIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingObservation) DeepCopyInto(out *DiskIAMBindingObservation) { + *out = *in + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingObservation. +func (in *DiskIAMBindingObservation) DeepCopy() *DiskIAMBindingObservation { + if in == nil { + return nil + } + out := new(DiskIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingParameters) DeepCopyInto(out *DiskIAMBindingParameters) { + *out = *in + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingParameters. +func (in *DiskIAMBindingParameters) DeepCopy() *DiskIAMBindingParameters { + if in == nil { + return nil + } + out := new(DiskIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingSpec) DeepCopyInto(out *DiskIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingSpec. +func (in *DiskIAMBindingSpec) DeepCopy() *DiskIAMBindingSpec { + if in == nil { + return nil + } + out := new(DiskIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskIAMBindingStatus) DeepCopyInto(out *DiskIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIAMBindingStatus. +func (in *DiskIAMBindingStatus) DeepCopy() *DiskIAMBindingStatus { + if in == nil { + return nil + } + out := new(DiskIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskInitParameters) DeepCopyInto(out *DiskInitParameters) { *out = *in @@ -1568,92 +1775,94 @@ func (in *DiskPlacementGroup) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DiskPlacementGroupInitParameters) DeepCopyInto(out *DiskPlacementGroupInitParameters) { +func (in *DiskPlacementGroupIAMBinding) DeepCopyInto(out *DiskPlacementGroupIAMBinding) { *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBinding. +func (in *DiskPlacementGroupIAMBinding) DeepCopy() *DiskPlacementGroupIAMBinding { + if in == nil { + return nil } - if in.FolderID != nil { - in, out := &in.FolderID, &out.FolderID + out := new(DiskPlacementGroupIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskPlacementGroupIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingInitParameters) DeepCopyInto(out *DiskPlacementGroupIAMBindingInitParameters) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID *out = new(string) **out = **in } - if in.FolderIDRef != nil { - in, out := &in.FolderIDRef, &out.FolderIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) - } - if in.FolderIDSelector != nil { - in, out := &in.FolderIDSelector, &out.FolderIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] *out = new(string) **out = **in } - (*out)[key] = outVal } } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.Zone != nil { - in, out := &in.Zone, &out.Zone + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupInitParameters. -func (in *DiskPlacementGroupInitParameters) DeepCopy() *DiskPlacementGroupInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingInitParameters. +func (in *DiskPlacementGroupIAMBindingInitParameters) DeepCopy() *DiskPlacementGroupIAMBindingInitParameters { if in == nil { return nil } - out := new(DiskPlacementGroupInitParameters) + out := new(DiskPlacementGroupIAMBindingInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DiskPlacementGroupList) DeepCopyInto(out *DiskPlacementGroupList) { +func (in *DiskPlacementGroupIAMBindingList) DeepCopyInto(out *DiskPlacementGroupIAMBindingList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]DiskPlacementGroup, len(*in)) + *out = make([]DiskPlacementGroupIAMBinding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupList. -func (in *DiskPlacementGroupList) DeepCopy() *DiskPlacementGroupList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingList. +func (in *DiskPlacementGroupIAMBindingList) DeepCopy() *DiskPlacementGroupIAMBindingList { if in == nil { return nil } - out := new(DiskPlacementGroupList) + out := new(DiskPlacementGroupIAMBindingList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DiskPlacementGroupList) DeepCopyObject() runtime.Object { +func (in *DiskPlacementGroupIAMBindingList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -1661,24 +1870,229 @@ func (in *DiskPlacementGroupList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DiskPlacementGroupObservation) DeepCopyInto(out *DiskPlacementGroupObservation) { +func (in *DiskPlacementGroupIAMBindingObservation) DeepCopyInto(out *DiskPlacementGroupIAMBindingObservation) { *out = *in - if in.CreatedAt != nil { - in, out := &in.CreatedAt, &out.CreatedAt + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID *out = new(string) **out = **in } - if in.Description != nil { - in, out := &in.Description, &out.Description + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.FolderID != nil { - in, out := &in.FolderID, &out.FolderID + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } - if in.ID != nil { +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingObservation. +func (in *DiskPlacementGroupIAMBindingObservation) DeepCopy() *DiskPlacementGroupIAMBindingObservation { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingParameters) DeepCopyInto(out *DiskPlacementGroupIAMBindingParameters) { + *out = *in + if in.DiskPlacementGroupID != nil { + in, out := &in.DiskPlacementGroupID, &out.DiskPlacementGroupID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingParameters. +func (in *DiskPlacementGroupIAMBindingParameters) DeepCopy() *DiskPlacementGroupIAMBindingParameters { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingSpec) DeepCopyInto(out *DiskPlacementGroupIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingSpec. +func (in *DiskPlacementGroupIAMBindingSpec) DeepCopy() *DiskPlacementGroupIAMBindingSpec { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupIAMBindingStatus) DeepCopyInto(out *DiskPlacementGroupIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupIAMBindingStatus. +func (in *DiskPlacementGroupIAMBindingStatus) DeepCopy() *DiskPlacementGroupIAMBindingStatus { + if in == nil { + return nil + } + out := new(DiskPlacementGroupIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupInitParameters) DeepCopyInto(out *DiskPlacementGroupInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupInitParameters. +func (in *DiskPlacementGroupInitParameters) DeepCopy() *DiskPlacementGroupInitParameters { + if in == nil { + return nil + } + out := new(DiskPlacementGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupList) DeepCopyInto(out *DiskPlacementGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DiskPlacementGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskPlacementGroupList. +func (in *DiskPlacementGroupList) DeepCopy() *DiskPlacementGroupList { + if in == nil { + return nil + } + out := new(DiskPlacementGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskPlacementGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskPlacementGroupObservation) DeepCopyInto(out *DiskPlacementGroupObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) **out = **in @@ -1964,6 +2378,213 @@ func (in *Filesystem) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBinding) DeepCopyInto(out *FilesystemIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBinding. +func (in *FilesystemIAMBinding) DeepCopy() *FilesystemIAMBinding { + if in == nil { + return nil + } + out := new(FilesystemIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FilesystemIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingInitParameters) DeepCopyInto(out *FilesystemIAMBindingInitParameters) { + *out = *in + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingInitParameters. +func (in *FilesystemIAMBindingInitParameters) DeepCopy() *FilesystemIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingList) DeepCopyInto(out *FilesystemIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FilesystemIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingList. +func (in *FilesystemIAMBindingList) DeepCopy() *FilesystemIAMBindingList { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FilesystemIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingObservation) DeepCopyInto(out *FilesystemIAMBindingObservation) { + *out = *in + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingObservation. +func (in *FilesystemIAMBindingObservation) DeepCopy() *FilesystemIAMBindingObservation { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingParameters) DeepCopyInto(out *FilesystemIAMBindingParameters) { + *out = *in + if in.FilesystemID != nil { + in, out := &in.FilesystemID, &out.FilesystemID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingParameters. +func (in *FilesystemIAMBindingParameters) DeepCopy() *FilesystemIAMBindingParameters { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingSpec) DeepCopyInto(out *FilesystemIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingSpec. +func (in *FilesystemIAMBindingSpec) DeepCopy() *FilesystemIAMBindingSpec { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemIAMBindingStatus) DeepCopyInto(out *FilesystemIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemIAMBindingStatus. +func (in *FilesystemIAMBindingStatus) DeepCopy() *FilesystemIAMBindingStatus { + if in == nil { + return nil + } + out := new(FilesystemIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FilesystemInitParameters) DeepCopyInto(out *FilesystemInitParameters) { *out = *in @@ -2299,56 +2920,263 @@ func (in *FixedScaleObservation) DeepCopy() *FixedScaleObservation { if in == nil { return nil } - out := new(FixedScaleObservation) + out := new(FixedScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedScaleParameters) DeepCopyInto(out *FixedScaleParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleParameters. +func (in *FixedScaleParameters) DeepCopy() *FixedScaleParameters { + if in == nil { + return nil + } + out := new(FixedScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuCluster) DeepCopyInto(out *GpuCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuCluster. +func (in *GpuCluster) DeepCopy() *GpuCluster { + if in == nil { + return nil + } + out := new(GpuCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBinding) DeepCopyInto(out *GpuClusterIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBinding. +func (in *GpuClusterIAMBinding) DeepCopy() *GpuClusterIAMBinding { + if in == nil { + return nil + } + out := new(GpuClusterIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuClusterIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingInitParameters) DeepCopyInto(out *GpuClusterIAMBindingInitParameters) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingInitParameters. +func (in *GpuClusterIAMBindingInitParameters) DeepCopy() *GpuClusterIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingList) DeepCopyInto(out *GpuClusterIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GpuClusterIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingList. +func (in *GpuClusterIAMBindingList) DeepCopy() *GpuClusterIAMBindingList { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GpuClusterIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingObservation) DeepCopyInto(out *GpuClusterIAMBindingObservation) { + *out = *in + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingObservation. +func (in *GpuClusterIAMBindingObservation) DeepCopy() *GpuClusterIAMBindingObservation { + if in == nil { + return nil + } + out := new(GpuClusterIAMBindingObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FixedScaleParameters) DeepCopyInto(out *FixedScaleParameters) { +func (in *GpuClusterIAMBindingParameters) DeepCopyInto(out *GpuClusterIAMBindingParameters) { *out = *in - if in.Size != nil { - in, out := &in.Size, &out.Size - *out = new(float64) + if in.GpuClusterID != nil { + in, out := &in.GpuClusterID, &out.GpuClusterID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedScaleParameters. -func (in *FixedScaleParameters) DeepCopy() *FixedScaleParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingParameters. +func (in *GpuClusterIAMBindingParameters) DeepCopy() *GpuClusterIAMBindingParameters { if in == nil { return nil } - out := new(FixedScaleParameters) + out := new(GpuClusterIAMBindingParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GpuCluster) DeepCopyInto(out *GpuCluster) { +func (in *GpuClusterIAMBindingSpec) DeepCopyInto(out *GpuClusterIAMBindingSpec) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuCluster. -func (in *GpuCluster) DeepCopy() *GpuCluster { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingSpec. +func (in *GpuClusterIAMBindingSpec) DeepCopy() *GpuClusterIAMBindingSpec { if in == nil { return nil } - out := new(GpuCluster) + out := new(GpuClusterIAMBindingSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GpuCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GpuClusterIAMBindingStatus) DeepCopyInto(out *GpuClusterIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GpuClusterIAMBindingStatus. +func (in *GpuClusterIAMBindingStatus) DeepCopy() *GpuClusterIAMBindingStatus { + if in == nil { + return nil } - return nil + out := new(GpuClusterIAMBindingStatus) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -2932,157 +3760,364 @@ func (in *HostAffinityRulesParameters) DeepCopyInto(out *HostAffinityRulesParame in, out := &in.Values, &out.Values *out = make([]*string, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(string) - **out = **in - } + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAffinityRulesParameters. +func (in *HostAffinityRulesParameters) DeepCopy() *HostAffinityRulesParameters { + if in == nil { + return nil + } + out := new(HostAffinityRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordInitParameters) DeepCopyInto(out *IPv6DNSRecordInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordInitParameters. +func (in *IPv6DNSRecordInitParameters) DeepCopy() *IPv6DNSRecordInitParameters { + if in == nil { + return nil + } + out := new(IPv6DNSRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordObservation) DeepCopyInto(out *IPv6DNSRecordObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordObservation. +func (in *IPv6DNSRecordObservation) DeepCopy() *IPv6DNSRecordObservation { + if in == nil { + return nil + } + out := new(IPv6DNSRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6DNSRecordParameters) DeepCopyInto(out *IPv6DNSRecordParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.Ptr != nil { + in, out := &in.Ptr, &out.Ptr + *out = new(bool) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordParameters. +func (in *IPv6DNSRecordParameters) DeepCopy() *IPv6DNSRecordParameters { + if in == nil { + return nil + } + out := new(IPv6DNSRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBinding) DeepCopyInto(out *ImageIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBinding. +func (in *ImageIAMBinding) DeepCopy() *ImageIAMBinding { + if in == nil { + return nil + } + out := new(ImageIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingInitParameters) DeepCopyInto(out *ImageIAMBindingInitParameters) { + *out = *in + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingInitParameters. +func (in *ImageIAMBindingInitParameters) DeepCopy() *ImageIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(ImageIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageIAMBindingList) DeepCopyInto(out *ImageIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAffinityRulesParameters. -func (in *HostAffinityRulesParameters) DeepCopy() *HostAffinityRulesParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingList. +func (in *ImageIAMBindingList) DeepCopy() *ImageIAMBindingList { if in == nil { return nil } - out := new(HostAffinityRulesParameters) + out := new(ImageIAMBindingList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPv6DNSRecordInitParameters) DeepCopyInto(out *IPv6DNSRecordInitParameters) { +func (in *ImageIAMBindingObservation) DeepCopyInto(out *ImageIAMBindingObservation) { *out = *in - if in.DNSZoneID != nil { - in, out := &in.DNSZoneID, &out.DNSZoneID + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.Fqdn != nil { - in, out := &in.Fqdn, &out.Fqdn + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID *out = new(string) **out = **in } - if in.Ptr != nil { - in, out := &in.Ptr, &out.Ptr - *out = new(bool) - **out = **in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(float64) + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordInitParameters. -func (in *IPv6DNSRecordInitParameters) DeepCopy() *IPv6DNSRecordInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingObservation. +func (in *ImageIAMBindingObservation) DeepCopy() *ImageIAMBindingObservation { if in == nil { return nil } - out := new(IPv6DNSRecordInitParameters) + out := new(ImageIAMBindingObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPv6DNSRecordObservation) DeepCopyInto(out *IPv6DNSRecordObservation) { +func (in *ImageIAMBindingParameters) DeepCopyInto(out *ImageIAMBindingParameters) { *out = *in - if in.DNSZoneID != nil { - in, out := &in.DNSZoneID, &out.DNSZoneID - *out = new(string) - **out = **in - } - if in.Fqdn != nil { - in, out := &in.Fqdn, &out.Fqdn + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID *out = new(string) **out = **in } - if in.Ptr != nil { - in, out := &in.Ptr, &out.Ptr - *out = new(bool) - **out = **in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(float64) + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordObservation. -func (in *IPv6DNSRecordObservation) DeepCopy() *IPv6DNSRecordObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingParameters. +func (in *ImageIAMBindingParameters) DeepCopy() *ImageIAMBindingParameters { if in == nil { return nil } - out := new(IPv6DNSRecordObservation) + out := new(ImageIAMBindingParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPv6DNSRecordParameters) DeepCopyInto(out *IPv6DNSRecordParameters) { +func (in *ImageIAMBindingSpec) DeepCopyInto(out *ImageIAMBindingSpec) { *out = *in - if in.DNSZoneID != nil { - in, out := &in.DNSZoneID, &out.DNSZoneID - *out = new(string) - **out = **in - } - if in.Fqdn != nil { - in, out := &in.Fqdn, &out.Fqdn - *out = new(string) - **out = **in - } - if in.Ptr != nil { - in, out := &in.Ptr, &out.Ptr - *out = new(bool) - **out = **in - } - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(float64) - **out = **in - } + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6DNSRecordParameters. -func (in *IPv6DNSRecordParameters) DeepCopy() *IPv6DNSRecordParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingSpec. +func (in *ImageIAMBindingSpec) DeepCopy() *ImageIAMBindingSpec { if in == nil { return nil } - out := new(IPv6DNSRecordParameters) + out := new(ImageIAMBindingSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { +func (in *ImageIAMBindingStatus) DeepCopyInto(out *ImageIAMBindingStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageIAMBindingStatus. +func (in *ImageIAMBindingStatus) DeepCopy() *ImageIAMBindingStatus { if in == nil { return nil } - out := new(Image) + out := new(ImageIAMBindingStatus) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Image) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { *out = *in @@ -4212,107 +5247,314 @@ func (in *InstanceGroupParameters) DeepCopyInto(out *InstanceGroupParameters) { *out = new(string) **out = **in } - (*out)[key] = outVal + (*out)[key] = outVal + } + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = make([]LoadBalancerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxCheckingHealthDuration != nil { + in, out := &in.MaxCheckingHealthDuration, &out.MaxCheckingHealthDuration + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ScalePolicy != nil { + in, out := &in.ScalePolicy, &out.ScalePolicy + *out = make([]ScalePolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupParameters. +func (in *InstanceGroupParameters) DeepCopy() *InstanceGroupParameters { + if in == nil { + return nil + } + out := new(InstanceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupSpec. +func (in *InstanceGroupSpec) DeepCopy() *InstanceGroupSpec { + if in == nil { + return nil + } + out := new(InstanceGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceGroupStatus) DeepCopyInto(out *InstanceGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupStatus. +func (in *InstanceGroupStatus) DeepCopy() *InstanceGroupStatus { + if in == nil { + return nil + } + out := new(InstanceGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBinding) DeepCopyInto(out *InstanceIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBinding. +func (in *InstanceIAMBinding) DeepCopy() *InstanceIAMBinding { + if in == nil { + return nil + } + out := new(InstanceIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingInitParameters) DeepCopyInto(out *InstanceIAMBindingInitParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } } } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = make([]LoadBalancerParameters, len(*in)) + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingInitParameters. +func (in *InstanceIAMBindingInitParameters) DeepCopy() *InstanceIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(InstanceIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingList) DeepCopyInto(out *InstanceIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstanceIAMBinding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.MaxCheckingHealthDuration != nil { - in, out := &in.MaxCheckingHealthDuration, &out.MaxCheckingHealthDuration - *out = new(float64) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingList. +func (in *InstanceIAMBindingList) DeepCopy() *InstanceIAMBindingList { + if in == nil { + return nil + } + out := new(InstanceIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingObservation) DeepCopyInto(out *InstanceIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) **out = **in } - if in.Name != nil { - in, out := &in.Name, &out.Name + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID *out = new(string) **out = **in } - if in.ScalePolicy != nil { - in, out := &in.ScalePolicy, &out.ScalePolicy - *out = make([]ScalePolicyParameters, len(*in)) + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } } } - if in.ServiceAccountID != nil { - in, out := &in.ServiceAccountID, &out.ServiceAccountID + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } - if in.ServiceAccountIDRef != nil { - in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef - *out = new(v1.Reference) - (*in).DeepCopyInto(*out) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingObservation. +func (in *InstanceIAMBindingObservation) DeepCopy() *InstanceIAMBindingObservation { + if in == nil { + return nil } - if in.ServiceAccountIDSelector != nil { - in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector - *out = new(v1.Selector) - (*in).DeepCopyInto(*out) + out := new(InstanceIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceIAMBindingParameters) DeepCopyInto(out *InstanceIAMBindingParameters) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in } - if in.Variables != nil { - in, out := &in.Variables, &out.Variables - *out = make(map[string]*string, len(*in)) - for key, val := range *in { - var outVal *string - if val == nil { - (*out)[key] = nil - } else { - inVal := (*in)[key] - in, out := &inVal, &outVal + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] *out = new(string) **out = **in } - (*out)[key] = outVal } } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupParameters. -func (in *InstanceGroupParameters) DeepCopy() *InstanceGroupParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingParameters. +func (in *InstanceIAMBindingParameters) DeepCopy() *InstanceIAMBindingParameters { if in == nil { return nil } - out := new(InstanceGroupParameters) + out := new(InstanceIAMBindingParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) { +func (in *InstanceIAMBindingSpec) DeepCopyInto(out *InstanceIAMBindingSpec) { *out = *in in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) in.ForProvider.DeepCopyInto(&out.ForProvider) in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupSpec. -func (in *InstanceGroupSpec) DeepCopy() *InstanceGroupSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingSpec. +func (in *InstanceIAMBindingSpec) DeepCopy() *InstanceIAMBindingSpec { if in == nil { return nil } - out := new(InstanceGroupSpec) + out := new(InstanceIAMBindingSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceGroupStatus) DeepCopyInto(out *InstanceGroupStatus) { +func (in *InstanceIAMBindingStatus) DeepCopyInto(out *InstanceIAMBindingStatus) { *out = *in in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) in.AtProvider.DeepCopyInto(&out.AtProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceGroupStatus. -func (in *InstanceGroupStatus) DeepCopy() *InstanceGroupStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceIAMBindingStatus. +func (in *InstanceIAMBindingStatus) DeepCopy() *InstanceIAMBindingStatus { if in == nil { return nil } - out := new(InstanceGroupStatus) + out := new(InstanceIAMBindingStatus) in.DeepCopyInto(out) return out } @@ -7736,96 +8978,303 @@ func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { if in == nil { return nil } - out := new(NetworkInterfaceParameters) + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSettingsInitParameters) DeepCopyInto(out *NetworkSettingsInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsInitParameters. +func (in *NetworkSettingsInitParameters) DeepCopy() *NetworkSettingsInitParameters { + if in == nil { + return nil + } + out := new(NetworkSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSettingsObservation) DeepCopyInto(out *NetworkSettingsObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsObservation. +func (in *NetworkSettingsObservation) DeepCopy() *NetworkSettingsObservation { + if in == nil { + return nil + } + out := new(NetworkSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSettingsParameters) DeepCopyInto(out *NetworkSettingsParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsParameters. +func (in *NetworkSettingsParameters) DeepCopy() *NetworkSettingsParameters { + if in == nil { + return nil + } + out := new(NetworkSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroup) DeepCopyInto(out *PlacementGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroup. +func (in *PlacementGroup) DeepCopy() *PlacementGroup { + if in == nil { + return nil + } + out := new(PlacementGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBinding) DeepCopyInto(out *PlacementGroupIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBinding. +func (in *PlacementGroupIAMBinding) DeepCopy() *PlacementGroupIAMBinding { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroupIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingInitParameters) DeepCopyInto(out *PlacementGroupIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingInitParameters. +func (in *PlacementGroupIAMBindingInitParameters) DeepCopy() *PlacementGroupIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(PlacementGroupIAMBindingInitParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSettingsInitParameters) DeepCopyInto(out *NetworkSettingsInitParameters) { +func (in *PlacementGroupIAMBindingList) DeepCopyInto(out *PlacementGroupIAMBindingList) { *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlacementGroupIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsInitParameters. -func (in *NetworkSettingsInitParameters) DeepCopy() *NetworkSettingsInitParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingList. +func (in *PlacementGroupIAMBindingList) DeepCopy() *PlacementGroupIAMBindingList { if in == nil { return nil } - out := new(NetworkSettingsInitParameters) + out := new(PlacementGroupIAMBindingList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementGroupIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSettingsObservation) DeepCopyInto(out *NetworkSettingsObservation) { +func (in *PlacementGroupIAMBindingObservation) DeepCopyInto(out *PlacementGroupIAMBindingObservation) { *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsObservation. -func (in *NetworkSettingsObservation) DeepCopy() *NetworkSettingsObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingObservation. +func (in *PlacementGroupIAMBindingObservation) DeepCopy() *PlacementGroupIAMBindingObservation { if in == nil { return nil } - out := new(NetworkSettingsObservation) + out := new(PlacementGroupIAMBindingObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSettingsParameters) DeepCopyInto(out *NetworkSettingsParameters) { +func (in *PlacementGroupIAMBindingParameters) DeepCopyInto(out *PlacementGroupIAMBindingParameters) { *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSettingsParameters. -func (in *NetworkSettingsParameters) DeepCopy() *NetworkSettingsParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingParameters. +func (in *PlacementGroupIAMBindingParameters) DeepCopy() *PlacementGroupIAMBindingParameters { if in == nil { return nil } - out := new(NetworkSettingsParameters) + out := new(PlacementGroupIAMBindingParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementGroup) DeepCopyInto(out *PlacementGroup) { +func (in *PlacementGroupIAMBindingSpec) DeepCopyInto(out *PlacementGroupIAMBindingSpec) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroup. -func (in *PlacementGroup) DeepCopy() *PlacementGroup { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingSpec. +func (in *PlacementGroupIAMBindingSpec) DeepCopy() *PlacementGroupIAMBindingSpec { if in == nil { return nil } - out := new(PlacementGroup) + out := new(PlacementGroupIAMBindingSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PlacementGroup) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementGroupIAMBindingStatus) DeepCopyInto(out *PlacementGroupIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementGroupIAMBindingStatus. +func (in *PlacementGroupIAMBindingStatus) DeepCopy() *PlacementGroupIAMBindingStatus { + if in == nil { + return nil } - return nil + out := new(PlacementGroupIAMBindingStatus) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -8682,111 +10131,318 @@ func (in *SecondaryDiskInitializeParamsParameters) DeepCopyInto(out *SecondaryDi } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskInitializeParamsParameters. -func (in *SecondaryDiskInitializeParamsParameters) DeepCopy() *SecondaryDiskInitializeParamsParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskInitializeParamsParameters. +func (in *SecondaryDiskInitializeParamsParameters) DeepCopy() *SecondaryDiskInitializeParamsParameters { + if in == nil { + return nil + } + out := new(SecondaryDiskInitializeParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskObservation) DeepCopyInto(out *SecondaryDiskObservation) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskObservation. +func (in *SecondaryDiskObservation) DeepCopy() *SecondaryDiskObservation { + if in == nil { + return nil + } + out := new(SecondaryDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryDiskParameters) DeepCopyInto(out *SecondaryDiskParameters) { + *out = *in + if in.AutoDelete != nil { + in, out := &in.AutoDelete, &out.AutoDelete + *out = new(bool) + **out = **in + } + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.DiskID != nil { + in, out := &in.DiskID, &out.DiskID + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskParameters. +func (in *SecondaryDiskParameters) DeepCopy() *SecondaryDiskParameters { + if in == nil { + return nil + } + out := new(SecondaryDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Snapshot) DeepCopyInto(out *Snapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Snapshot. +func (in *Snapshot) DeepCopy() *Snapshot { + if in == nil { + return nil + } + out := new(Snapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Snapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBinding) DeepCopyInto(out *SnapshotIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBinding. +func (in *SnapshotIAMBinding) DeepCopy() *SnapshotIAMBinding { + if in == nil { + return nil + } + out := new(SnapshotIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingInitParameters) DeepCopyInto(out *SnapshotIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingInitParameters. +func (in *SnapshotIAMBindingInitParameters) DeepCopy() *SnapshotIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(SnapshotIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingList) DeepCopyInto(out *SnapshotIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingList. +func (in *SnapshotIAMBindingList) DeepCopy() *SnapshotIAMBindingList { if in == nil { return nil } - out := new(SecondaryDiskInitializeParamsParameters) + out := new(SnapshotIAMBindingList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecondaryDiskObservation) DeepCopyInto(out *SecondaryDiskObservation) { +func (in *SnapshotIAMBindingObservation) DeepCopyInto(out *SnapshotIAMBindingObservation) { *out = *in - if in.AutoDelete != nil { - in, out := &in.AutoDelete, &out.AutoDelete - *out = new(bool) - **out = **in - } - if in.DeviceName != nil { - in, out := &in.DeviceName, &out.DeviceName + if in.ID != nil { + in, out := &in.ID, &out.ID *out = new(string) **out = **in } - if in.DiskID != nil { - in, out := &in.DiskID, &out.DiskID + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskObservation. -func (in *SecondaryDiskObservation) DeepCopy() *SecondaryDiskObservation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingObservation. +func (in *SnapshotIAMBindingObservation) DeepCopy() *SnapshotIAMBindingObservation { if in == nil { return nil } - out := new(SecondaryDiskObservation) + out := new(SnapshotIAMBindingObservation) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecondaryDiskParameters) DeepCopyInto(out *SecondaryDiskParameters) { +func (in *SnapshotIAMBindingParameters) DeepCopyInto(out *SnapshotIAMBindingParameters) { *out = *in - if in.AutoDelete != nil { - in, out := &in.AutoDelete, &out.AutoDelete - *out = new(bool) - **out = **in - } - if in.DeviceName != nil { - in, out := &in.DeviceName, &out.DeviceName - *out = new(string) - **out = **in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } } - if in.DiskID != nil { - in, out := &in.DiskID, &out.DiskID + if in.Role != nil { + in, out := &in.Role, &out.Role *out = new(string) **out = **in } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryDiskParameters. -func (in *SecondaryDiskParameters) DeepCopy() *SecondaryDiskParameters { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingParameters. +func (in *SnapshotIAMBindingParameters) DeepCopy() *SnapshotIAMBindingParameters { if in == nil { return nil } - out := new(SecondaryDiskParameters) + out := new(SnapshotIAMBindingParameters) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Snapshot) DeepCopyInto(out *Snapshot) { +func (in *SnapshotIAMBindingSpec) DeepCopyInto(out *SnapshotIAMBindingSpec) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Snapshot. -func (in *Snapshot) DeepCopy() *Snapshot { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingSpec. +func (in *SnapshotIAMBindingSpec) DeepCopy() *SnapshotIAMBindingSpec { if in == nil { return nil } - out := new(Snapshot) + out := new(SnapshotIAMBindingSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Snapshot) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotIAMBindingStatus) DeepCopyInto(out *SnapshotIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotIAMBindingStatus. +func (in *SnapshotIAMBindingStatus) DeepCopy() *SnapshotIAMBindingStatus { + if in == nil { + return nil } - return nil + out := new(SnapshotIAMBindingStatus) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -9061,6 +10717,213 @@ func (in *SnapshotSchedule) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBinding) DeepCopyInto(out *SnapshotScheduleIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBinding. +func (in *SnapshotScheduleIAMBinding) DeepCopy() *SnapshotScheduleIAMBinding { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotScheduleIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingInitParameters) DeepCopyInto(out *SnapshotScheduleIAMBindingInitParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotScheduleID != nil { + in, out := &in.SnapshotScheduleID, &out.SnapshotScheduleID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingInitParameters. +func (in *SnapshotScheduleIAMBindingInitParameters) DeepCopy() *SnapshotScheduleIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingList) DeepCopyInto(out *SnapshotScheduleIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotScheduleIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingList. +func (in *SnapshotScheduleIAMBindingList) DeepCopy() *SnapshotScheduleIAMBindingList { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotScheduleIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingObservation) DeepCopyInto(out *SnapshotScheduleIAMBindingObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotScheduleID != nil { + in, out := &in.SnapshotScheduleID, &out.SnapshotScheduleID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingObservation. +func (in *SnapshotScheduleIAMBindingObservation) DeepCopy() *SnapshotScheduleIAMBindingObservation { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingParameters) DeepCopyInto(out *SnapshotScheduleIAMBindingParameters) { + *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SnapshotScheduleID != nil { + in, out := &in.SnapshotScheduleID, &out.SnapshotScheduleID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingParameters. +func (in *SnapshotScheduleIAMBindingParameters) DeepCopy() *SnapshotScheduleIAMBindingParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingSpec) DeepCopyInto(out *SnapshotScheduleIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingSpec. +func (in *SnapshotScheduleIAMBindingSpec) DeepCopy() *SnapshotScheduleIAMBindingSpec { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleIAMBindingStatus) DeepCopyInto(out *SnapshotScheduleIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleIAMBindingStatus. +func (in *SnapshotScheduleIAMBindingStatus) DeepCopy() *SnapshotScheduleIAMBindingStatus { + if in == nil { + return nil + } + out := new(SnapshotScheduleIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SnapshotScheduleInitParameters) DeepCopyInto(out *SnapshotScheduleInitParameters) { *out = *in diff --git a/apis/compute/v1alpha1/zz_generated.managed.go b/apis/compute/v1alpha1/zz_generated.managed.go index c770abc..78b7ad3 100644 --- a/apis/compute/v1alpha1/zz_generated.managed.go +++ b/apis/compute/v1alpha1/zz_generated.managed.go @@ -64,6 +64,66 @@ func (mg *Disk) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this DiskIAMBinding. +func (mg *DiskIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DiskIAMBinding. +func (mg *DiskIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DiskIAMBinding. +func (mg *DiskIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DiskIAMBinding. +func (mg *DiskIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DiskIAMBinding. +func (mg *DiskIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DiskIAMBinding. +func (mg *DiskIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DiskIAMBinding. +func (mg *DiskIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DiskIAMBinding. +func (mg *DiskIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DiskIAMBinding. +func (mg *DiskIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DiskIAMBinding. +func (mg *DiskIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DiskIAMBinding. +func (mg *DiskIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DiskIAMBinding. +func (mg *DiskIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this DiskPlacementGroup. func (mg *DiskPlacementGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -124,6 +184,66 @@ func (mg *DiskPlacementGroup) SetWriteConnectionSecretToReference(r *xpv1.Secret mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DiskPlacementGroupIAMBinding. +func (mg *DiskPlacementGroupIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Filesystem. func (mg *Filesystem) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -184,6 +304,66 @@ func (mg *Filesystem) SetWriteConnectionSecretToReference(r *xpv1.SecretReferenc mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FilesystemIAMBinding. +func (mg *FilesystemIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this GpuCluster. func (mg *GpuCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -244,6 +424,66 @@ func (mg *GpuCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReferenc mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GpuClusterIAMBinding. +func (mg *GpuClusterIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Image. func (mg *Image) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -304,6 +544,66 @@ func (mg *Image) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this ImageIAMBinding. +func (mg *ImageIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ImageIAMBinding. +func (mg *ImageIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ImageIAMBinding. +func (mg *ImageIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ImageIAMBinding. +func (mg *ImageIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ImageIAMBinding. +func (mg *ImageIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ImageIAMBinding. +func (mg *ImageIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ImageIAMBinding. +func (mg *ImageIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ImageIAMBinding. +func (mg *ImageIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ImageIAMBinding. +func (mg *ImageIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ImageIAMBinding. +func (mg *ImageIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ImageIAMBinding. +func (mg *ImageIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ImageIAMBinding. +func (mg *ImageIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Instance. func (mg *Instance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -424,6 +724,66 @@ func (mg *InstanceGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretRefer mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this InstanceIAMBinding. +func (mg *InstanceIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this PlacementGroup. func (mg *PlacementGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -484,6 +844,66 @@ func (mg *PlacementGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretRefe mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PlacementGroupIAMBinding. +func (mg *PlacementGroupIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Snapshot. func (mg *Snapshot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -544,6 +964,66 @@ func (mg *Snapshot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SnapshotIAMBinding. +func (mg *SnapshotIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this SnapshotSchedule. func (mg *SnapshotSchedule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) @@ -603,3 +1083,63 @@ func (mg *SnapshotSchedule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnect func (mg *SnapshotSchedule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } + +// GetCondition of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SnapshotScheduleIAMBinding. +func (mg *SnapshotScheduleIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/compute/v1alpha1/zz_generated.managedlist.go b/apis/compute/v1alpha1/zz_generated.managedlist.go index f5503fd..aa67baa 100644 --- a/apis/compute/v1alpha1/zz_generated.managedlist.go +++ b/apis/compute/v1alpha1/zz_generated.managedlist.go @@ -4,6 +4,15 @@ package v1alpha1 import resource "github.com/crossplane/crossplane-runtime/pkg/resource" +// GetItems of this DiskIAMBindingList. +func (l *DiskIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this DiskList. func (l *DiskList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -13,6 +22,15 @@ func (l *DiskList) GetItems() []resource.Managed { return items } +// GetItems of this DiskPlacementGroupIAMBindingList. +func (l *DiskPlacementGroupIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this DiskPlacementGroupList. func (l *DiskPlacementGroupList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -22,6 +40,15 @@ func (l *DiskPlacementGroupList) GetItems() []resource.Managed { return items } +// GetItems of this FilesystemIAMBindingList. +func (l *FilesystemIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this FilesystemList. func (l *FilesystemList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -31,6 +58,15 @@ func (l *FilesystemList) GetItems() []resource.Managed { return items } +// GetItems of this GpuClusterIAMBindingList. +func (l *GpuClusterIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this GpuClusterList. func (l *GpuClusterList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -40,6 +76,15 @@ func (l *GpuClusterList) GetItems() []resource.Managed { return items } +// GetItems of this ImageIAMBindingList. +func (l *ImageIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this ImageList. func (l *ImageList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -58,6 +103,15 @@ func (l *InstanceGroupList) GetItems() []resource.Managed { return items } +// GetItems of this InstanceIAMBindingList. +func (l *InstanceIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this InstanceList. func (l *InstanceList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -67,6 +121,15 @@ func (l *InstanceList) GetItems() []resource.Managed { return items } +// GetItems of this PlacementGroupIAMBindingList. +func (l *PlacementGroupIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this PlacementGroupList. func (l *PlacementGroupList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -76,6 +139,15 @@ func (l *PlacementGroupList) GetItems() []resource.Managed { return items } +// GetItems of this SnapshotIAMBindingList. +func (l *SnapshotIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this SnapshotList. func (l *SnapshotList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) @@ -85,6 +157,15 @@ func (l *SnapshotList) GetItems() []resource.Managed { return items } +// GetItems of this SnapshotScheduleIAMBindingList. +func (l *SnapshotScheduleIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this SnapshotScheduleList. func (l *SnapshotScheduleList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/compute/v1alpha1/zz_gpuclusteriambinding_terraformed.go b/apis/compute/v1alpha1/zz_gpuclusteriambinding_terraformed.go new file mode 100755 index 0000000..939fd80 --- /dev/null +++ b/apis/compute/v1alpha1/zz_gpuclusteriambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GpuClusterIAMBinding +func (mg *GpuClusterIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_gpu_cluster_iam_binding" +} + +// GetConnectionDetailsMapping for this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GpuClusterIAMBinding +func (tr *GpuClusterIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GpuClusterIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GpuClusterIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &GpuClusterIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GpuClusterIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_gpuclusteriambinding_types.go b/apis/compute/v1alpha1/zz_gpuclusteriambinding_types.go new file mode 100755 index 0000000..84d7905 --- /dev/null +++ b/apis/compute/v1alpha1/zz_gpuclusteriambinding_types.go @@ -0,0 +1,124 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GpuClusterIAMBindingInitParameters struct { + + // ID of the gpu cluster to attach the policy to. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_gpu_cluster_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type GpuClusterIAMBindingObservation struct { + + // ID of the gpu cluster to attach the policy to. + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_gpu_cluster_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type GpuClusterIAMBindingParameters struct { + + // ID of the gpu cluster to attach the policy to. + // +kubebuilder:validation:Optional + GpuClusterID *string `json:"gpuClusterId,omitempty" tf:"gpu_cluster_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_gpu_cluster_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// GpuClusterIAMBindingSpec defines the desired state of GpuClusterIAMBinding +type GpuClusterIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GpuClusterIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GpuClusterIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// GpuClusterIAMBindingStatus defines the observed state of GpuClusterIAMBinding. +type GpuClusterIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GpuClusterIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// GpuClusterIAMBinding is the Schema for the GpuClusterIAMBindings API. Allows management of a single IAM binding for a GPU Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type GpuClusterIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gpuClusterId) || (has(self.initProvider) && has(self.initProvider.gpuClusterId))",message="spec.forProvider.gpuClusterId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec GpuClusterIAMBindingSpec `json:"spec"` + Status GpuClusterIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GpuClusterIAMBindingList contains a list of GpuClusterIAMBindings +type GpuClusterIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GpuClusterIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + GpuClusterIAMBinding_Kind = "GpuClusterIAMBinding" + GpuClusterIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GpuClusterIAMBinding_Kind}.String() + GpuClusterIAMBinding_KindAPIVersion = GpuClusterIAMBinding_Kind + "." + CRDGroupVersion.String() + GpuClusterIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(GpuClusterIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&GpuClusterIAMBinding{}, &GpuClusterIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_imageiambinding_terraformed.go b/apis/compute/v1alpha1/zz_imageiambinding_terraformed.go new file mode 100755 index 0000000..e13b95e --- /dev/null +++ b/apis/compute/v1alpha1/zz_imageiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ImageIAMBinding +func (mg *ImageIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_image_iam_binding" +} + +// GetConnectionDetailsMapping for this ImageIAMBinding +func (tr *ImageIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ImageIAMBinding +func (tr *ImageIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ImageIAMBinding +func (tr *ImageIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ImageIAMBinding +func (tr *ImageIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ImageIAMBinding +func (tr *ImageIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ImageIAMBinding +func (tr *ImageIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ImageIAMBinding +func (tr *ImageIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ImageIAMBinding +func (tr *ImageIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ImageIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ImageIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &ImageIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ImageIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_imageiambinding_types.go b/apis/compute/v1alpha1/zz_imageiambinding_types.go new file mode 100755 index 0000000..3868682 --- /dev/null +++ b/apis/compute/v1alpha1/zz_imageiambinding_types.go @@ -0,0 +1,123 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ImageIAMBindingInitParameters struct { + + // ID of the image to attach the policy to. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_image_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type ImageIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of the image to attach the policy to. + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_image_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type ImageIAMBindingParameters struct { + + // ID of the image to attach the policy to. + // +kubebuilder:validation:Optional + ImageID *string `json:"imageId,omitempty" tf:"image_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_image_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// ImageIAMBindingSpec defines the desired state of ImageIAMBinding +type ImageIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// ImageIAMBindingStatus defines the observed state of ImageIAMBinding. +type ImageIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ImageIAMBinding is the Schema for the ImageIAMBindings API. Allows management of a single IAM binding for an image. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ImageIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.imageId) || (has(self.initProvider) && has(self.initProvider.imageId))",message="spec.forProvider.imageId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ImageIAMBindingSpec `json:"spec"` + Status ImageIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageIAMBindingList contains a list of ImageIAMBindings +type ImageIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ImageIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + ImageIAMBinding_Kind = "ImageIAMBinding" + ImageIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ImageIAMBinding_Kind}.String() + ImageIAMBinding_KindAPIVersion = ImageIAMBinding_Kind + "." + CRDGroupVersion.String() + ImageIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(ImageIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&ImageIAMBinding{}, &ImageIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_instanceiambinding_terraformed.go b/apis/compute/v1alpha1/zz_instanceiambinding_terraformed.go new file mode 100755 index 0000000..ade3eec --- /dev/null +++ b/apis/compute/v1alpha1/zz_instanceiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this InstanceIAMBinding +func (mg *InstanceIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_instance_iam_binding" +} + +// GetConnectionDetailsMapping for this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this InstanceIAMBinding +func (tr *InstanceIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this InstanceIAMBinding +func (tr *InstanceIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this InstanceIAMBinding +func (tr *InstanceIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this InstanceIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *InstanceIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *InstanceIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_instanceiambinding_types.go b/apis/compute/v1alpha1/zz_instanceiambinding_types.go new file mode 100755 index 0000000..d915c30 --- /dev/null +++ b/apis/compute/v1alpha1/zz_instanceiambinding_types.go @@ -0,0 +1,123 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InstanceIAMBindingInitParameters struct { + + // ID of the instance to attach the policy to. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_instance_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type InstanceIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of the instance to attach the policy to. + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_instance_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type InstanceIAMBindingParameters struct { + + // ID of the instance to attach the policy to. + // +kubebuilder:validation:Optional + InstanceID *string `json:"instanceId,omitempty" tf:"instance_id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_instance_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// InstanceIAMBindingSpec defines the desired state of InstanceIAMBinding +type InstanceIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// InstanceIAMBindingStatus defines the observed state of InstanceIAMBinding. +type InstanceIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// InstanceIAMBinding is the Schema for the InstanceIAMBindings API. Allows management of a single IAM binding for an instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type InstanceIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instanceId) || (has(self.initProvider) && has(self.initProvider.instanceId))",message="spec.forProvider.instanceId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec InstanceIAMBindingSpec `json:"spec"` + Status InstanceIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceIAMBindingList contains a list of InstanceIAMBindings +type InstanceIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InstanceIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + InstanceIAMBinding_Kind = "InstanceIAMBinding" + InstanceIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: InstanceIAMBinding_Kind}.String() + InstanceIAMBinding_KindAPIVersion = InstanceIAMBinding_Kind + "." + CRDGroupVersion.String() + InstanceIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(InstanceIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&InstanceIAMBinding{}, &InstanceIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_placementgroupiambinding_terraformed.go b/apis/compute/v1alpha1/zz_placementgroupiambinding_terraformed.go new file mode 100755 index 0000000..82bc109 --- /dev/null +++ b/apis/compute/v1alpha1/zz_placementgroupiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PlacementGroupIAMBinding +func (mg *PlacementGroupIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_placement_group_iam_binding" +} + +// GetConnectionDetailsMapping for this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PlacementGroupIAMBinding +func (tr *PlacementGroupIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PlacementGroupIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PlacementGroupIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &PlacementGroupIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PlacementGroupIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_placementgroupiambinding_types.go b/apis/compute/v1alpha1/zz_placementgroupiambinding_types.go new file mode 100755 index 0000000..97a0edb --- /dev/null +++ b/apis/compute/v1alpha1/zz_placementgroupiambinding_types.go @@ -0,0 +1,123 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PlacementGroupIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the placement group to attach the policy to. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type PlacementGroupIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the placement group to attach the policy to. + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_placement_group_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type PlacementGroupIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // ID of the placement group to attach the policy to. + // +kubebuilder:validation:Optional + PlacementGroupID *string `json:"placementGroupId,omitempty" tf:"placement_group_id,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_placement_group_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// PlacementGroupIAMBindingSpec defines the desired state of PlacementGroupIAMBinding +type PlacementGroupIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PlacementGroupIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PlacementGroupIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// PlacementGroupIAMBindingStatus defines the observed state of PlacementGroupIAMBinding. +type PlacementGroupIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PlacementGroupIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// PlacementGroupIAMBinding is the Schema for the PlacementGroupIAMBindings API. Allows management of a single IAM binding for a Placement Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type PlacementGroupIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.placementGroupId) || (has(self.initProvider) && has(self.initProvider.placementGroupId))",message="spec.forProvider.placementGroupId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec PlacementGroupIAMBindingSpec `json:"spec"` + Status PlacementGroupIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PlacementGroupIAMBindingList contains a list of PlacementGroupIAMBindings +type PlacementGroupIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlacementGroupIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + PlacementGroupIAMBinding_Kind = "PlacementGroupIAMBinding" + PlacementGroupIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PlacementGroupIAMBinding_Kind}.String() + PlacementGroupIAMBinding_KindAPIVersion = PlacementGroupIAMBinding_Kind + "." + CRDGroupVersion.String() + PlacementGroupIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(PlacementGroupIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&PlacementGroupIAMBinding{}, &PlacementGroupIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_snapshotiambinding_terraformed.go b/apis/compute/v1alpha1/zz_snapshotiambinding_terraformed.go new file mode 100755 index 0000000..6bf6283 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SnapshotIAMBinding +func (mg *SnapshotIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_snapshot_iam_binding" +} + +// GetConnectionDetailsMapping for this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SnapshotIAMBinding +func (tr *SnapshotIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SnapshotIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SnapshotIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SnapshotIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_snapshotiambinding_types.go b/apis/compute/v1alpha1/zz_snapshotiambinding_types.go new file mode 100755 index 0000000..e16c933 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotiambinding_types.go @@ -0,0 +1,123 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SnapshotIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_snapshot_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot to attach the policy to. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` +} + +type SnapshotIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_snapshot_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot to attach the policy to. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` +} + +type SnapshotIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_snapshot_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot to attach the policy to. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` +} + +// SnapshotIAMBindingSpec defines the desired state of SnapshotIAMBinding +type SnapshotIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotIAMBindingStatus defines the observed state of SnapshotIAMBinding. +type SnapshotIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SnapshotIAMBinding is the Schema for the SnapshotIAMBindings API. Allows management of a single IAM binding for a Snapshot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SnapshotIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.snapshotId) || (has(self.initProvider) && has(self.initProvider.snapshotId))",message="spec.forProvider.snapshotId is a required parameter" + Spec SnapshotIAMBindingSpec `json:"spec"` + Status SnapshotIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotIAMBindingList contains a list of SnapshotIAMBindings +type SnapshotIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SnapshotIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + SnapshotIAMBinding_Kind = "SnapshotIAMBinding" + SnapshotIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SnapshotIAMBinding_Kind}.String() + SnapshotIAMBinding_KindAPIVersion = SnapshotIAMBinding_Kind + "." + CRDGroupVersion.String() + SnapshotIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(SnapshotIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&SnapshotIAMBinding{}, &SnapshotIAMBindingList{}) +} diff --git a/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_terraformed.go b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_terraformed.go new file mode 100755 index 0000000..52d6c04 --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SnapshotScheduleIAMBinding +func (mg *SnapshotScheduleIAMBinding) GetTerraformResourceType() string { + return "yandex_compute_snapshot_schedule_iam_binding" +} + +// GetConnectionDetailsMapping for this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SnapshotScheduleIAMBinding +func (tr *SnapshotScheduleIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SnapshotScheduleIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SnapshotScheduleIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotScheduleIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SnapshotScheduleIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_types.go b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_types.go new file mode 100755 index 0000000..a089a8d --- /dev/null +++ b/apis/compute/v1alpha1/zz_snapshotscheduleiambinding_types.go @@ -0,0 +1,123 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SnapshotScheduleIAMBindingInitParameters struct { + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_snapshot_schedule_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot schedule to attach the policy to. + SnapshotScheduleID *string `json:"snapshotScheduleId,omitempty" tf:"snapshot_schedule_id,omitempty"` +} + +type SnapshotScheduleIAMBindingObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_snapshot_schedule_iam_binding can be used per role. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot schedule to attach the policy to. + SnapshotScheduleID *string `json:"snapshotScheduleId,omitempty" tf:"snapshot_schedule_id,omitempty"` +} + +type SnapshotScheduleIAMBindingParameters struct { + + // An array of identities that will be granted the privilege in the role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be assigned. Only one + // yandex_compute_snapshot_schedule_iam_binding can be used per role. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // ID of the snapshot schedule to attach the policy to. + // +kubebuilder:validation:Optional + SnapshotScheduleID *string `json:"snapshotScheduleId,omitempty" tf:"snapshot_schedule_id,omitempty"` +} + +// SnapshotScheduleIAMBindingSpec defines the desired state of SnapshotScheduleIAMBinding +type SnapshotScheduleIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotScheduleIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotScheduleIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotScheduleIAMBindingStatus defines the observed state of SnapshotScheduleIAMBinding. +type SnapshotScheduleIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotScheduleIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// SnapshotScheduleIAMBinding is the Schema for the SnapshotScheduleIAMBindings API. Allows management of a single IAM binding for a Snapshot Schedule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type SnapshotScheduleIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.snapshotScheduleId) || (has(self.initProvider) && has(self.initProvider.snapshotScheduleId))",message="spec.forProvider.snapshotScheduleId is a required parameter" + Spec SnapshotScheduleIAMBindingSpec `json:"spec"` + Status SnapshotScheduleIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotScheduleIAMBindingList contains a list of SnapshotScheduleIAMBindings +type SnapshotScheduleIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SnapshotScheduleIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + SnapshotScheduleIAMBinding_Kind = "SnapshotScheduleIAMBinding" + SnapshotScheduleIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SnapshotScheduleIAMBinding_Kind}.String() + SnapshotScheduleIAMBinding_KindAPIVersion = SnapshotScheduleIAMBinding_Kind + "." + CRDGroupVersion.String() + SnapshotScheduleIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(SnapshotScheduleIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&SnapshotScheduleIAMBinding{}, &SnapshotScheduleIAMBindingList{}) +} diff --git a/apis/dataproc/v1alpha1/zz_cluster_terraformed.go b/apis/dataproc/v1alpha1/zz_cluster_terraformed.go new file mode 100755 index 0000000..3cbc045 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_cluster_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "yandex_dataproc_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataproc/v1alpha1/zz_cluster_types.go b/apis/dataproc/v1alpha1/zz_cluster_types.go new file mode 100755 index 0000000..ff131e5 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_cluster_types.go @@ -0,0 +1,631 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoscalingConfigInitParameters struct { + + // Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. + CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // Timeout to gracefully decommission nodes during downscaling. In seconds. + DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` + + // Maximum number of nodes in autoscaling subclusters. + MaxHostsCount *float64 `json:"maxHostsCount,omitempty" tf:"max_hosts_count,omitempty"` + + // Time in seconds allotted for averaging metrics. + MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + + // Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. + StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. + WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type AutoscalingConfigObservation struct { + + // Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. + CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // Timeout to gracefully decommission nodes during downscaling. In seconds. + DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` + + // Maximum number of nodes in autoscaling subclusters. + MaxHostsCount *float64 `json:"maxHostsCount,omitempty" tf:"max_hosts_count,omitempty"` + + // Time in seconds allotted for averaging metrics. + MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + + // Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. + StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. + WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type AutoscalingConfigParameters struct { + + // Defines an autoscaling rule based on the average CPU utilization of the instance group. If not set default autoscaling metric will be used. + // +kubebuilder:validation:Optional + CPUUtilizationTarget *string `json:"cpuUtilizationTarget,omitempty" tf:"cpu_utilization_target,omitempty"` + + // Timeout to gracefully decommission nodes during downscaling. In seconds. + // +kubebuilder:validation:Optional + DecommissionTimeout *string `json:"decommissionTimeout,omitempty" tf:"decommission_timeout,omitempty"` + + // Maximum number of nodes in autoscaling subclusters. + // +kubebuilder:validation:Optional + MaxHostsCount *float64 `json:"maxHostsCount" tf:"max_hosts_count,omitempty"` + + // Time in seconds allotted for averaging metrics. + // +kubebuilder:validation:Optional + MeasurementDuration *string `json:"measurementDuration,omitempty" tf:"measurement_duration,omitempty"` + + // Bool flag -- whether to use preemptible compute instances. Preemptible instances are stopped at least once every 24 hours, and can be stopped at any time if their resources are needed by Compute. For more information, see Preemptible Virtual Machines. + // +kubebuilder:validation:Optional + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + + // Minimum amount of time in seconds allotted for monitoring before Instance Groups can reduce the number of instances in the group. During this time, the group size doesn't decrease, even if the new metric values indicate that it should. + // +kubebuilder:validation:Optional + StabilizationDuration *string `json:"stabilizationDuration,omitempty" tf:"stabilization_duration,omitempty"` + + // The warmup time of the instance in seconds. During this time, traffic is sent to the instance, but instance metrics are not collected. + // +kubebuilder:validation:Optional + WarmupDuration *string `json:"warmupDuration,omitempty" tf:"warmup_duration,omitempty"` +} + +type ClusterConfigInitParameters struct { + + // Data Proc specific options. The structure is documented below. + Hadoop []HadoopInitParameters `json:"hadoop,omitempty" tf:"hadoop,omitempty"` + + // Configuration of the Data Proc subcluster. The structure is documented below. + SubclusterSpec []SubclusterSpecInitParameters `json:"subclusterSpec,omitempty" tf:"subcluster_spec,omitempty"` + + // Version of Data Proc image. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type ClusterConfigObservation struct { + + // Data Proc specific options. The structure is documented below. + Hadoop []HadoopObservation `json:"hadoop,omitempty" tf:"hadoop,omitempty"` + + // Configuration of the Data Proc subcluster. The structure is documented below. + SubclusterSpec []SubclusterSpecObservation `json:"subclusterSpec,omitempty" tf:"subcluster_spec,omitempty"` + + // Version of Data Proc image. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type ClusterConfigParameters struct { + + // Data Proc specific options. The structure is documented below. + // +kubebuilder:validation:Optional + Hadoop []HadoopParameters `json:"hadoop,omitempty" tf:"hadoop,omitempty"` + + // Configuration of the Data Proc subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + SubclusterSpec []SubclusterSpecParameters `json:"subclusterSpec" tf:"subcluster_spec,omitempty"` + + // Version of Data Proc image. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type ClusterInitParameters struct { + + // Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Configuration and resources for hosts that should be created with the cluster. The structure is documented below. + ClusterConfig []ClusterConfigInitParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Data Proc cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of host group IDs to place VMs of the cluster on. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // A set of key/value label pairs to assign to the Data Proc cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of a specific Data Proc cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of security group IDs that the cluster belongs to. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // Whether to enable UI Proxy feature. + UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` + + // ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type ClusterObservation struct { + + // Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Configuration and resources for hosts that should be created with the cluster. The structure is documented below. + ClusterConfig []ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // (Computed) The Data Proc cluster creation timestamp. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Data Proc cluster. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // A list of host group IDs to place VMs of the cluster on. + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // (Computed) ID of a new Data Proc cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the Data Proc cluster. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of a specific Data Proc cluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of security group IDs that the cluster belongs to. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Whether to enable UI Proxy feature. + UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` + + // ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type ClusterParameters struct { + + // Name of the Object Storage bucket to use for Data Proc jobs. Data Proc Agent saves output of job driver's process to specified bucket. In order for this to work service account (specified by the service_account_id argument) should be given permission to create objects within this bucket. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("bucket",false) + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Reference to a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketRef *v1.Reference `json:"bucketRef,omitempty" tf:"-"` + + // Selector for a Bucket in storage to populate bucket. + // +kubebuilder:validation:Optional + BucketSelector *v1.Selector `json:"bucketSelector,omitempty" tf:"-"` + + // Configuration and resources for hosts that should be created with the cluster. The structure is documented below. + // +kubebuilder:validation:Optional + ClusterConfig []ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Inhibits deletion of the cluster. Can be either true or false. + // +kubebuilder:validation:Optional + DeletionProtection *bool `json:"deletionProtection,omitempty" tf:"deletion_protection,omitempty"` + + // Description of the Data Proc cluster. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // ID of the folder to create a cluster in. If it is not provided, the default provider folder is used. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A list of host group IDs to place VMs of the cluster on. + // +kubebuilder:validation:Optional + // +listType=set + HostGroupIds []*string `json:"hostGroupIds,omitempty" tf:"host_group_ids,omitempty"` + + // A set of key/value label pairs to assign to the Data Proc cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Name of a specific Data Proc cluster. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of security group IDs that the cluster belongs to. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // Service account to be used by the Data Proc agent to access resources of Yandex.Cloud. Selected service account should have mdb.dataproc.agent role on the folder where the Data Proc cluster will be located. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Reference to a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDRef *v1.Reference `json:"serviceAccountIdRef,omitempty" tf:"-"` + + // Selector for a ServiceAccount in iam to populate serviceAccountId. + // +kubebuilder:validation:Optional + ServiceAccountIDSelector *v1.Selector `json:"serviceAccountIdSelector,omitempty" tf:"-"` + + // Whether to enable UI Proxy feature. + // +kubebuilder:validation:Optional + UIProxy *bool `json:"uiProxy,omitempty" tf:"ui_proxy,omitempty"` + + // ID of the availability zone to create cluster in. If it is not provided, the default provider zone is used. + // +kubebuilder:validation:Optional + ZoneID *string `json:"zoneId,omitempty" tf:"zone_id,omitempty"` +} + +type HadoopInitParameters struct { + + // List of initialization scripts. The structure is documented below. + InitializationAction []InitializationActionInitParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` + + // A set of key/value pairs that are used to configure cluster services. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. + // +listType=set + SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` + + // List of services to run on Data Proc cluster. + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` +} + +type HadoopObservation struct { + + // List of initialization scripts. The structure is documented below. + InitializationAction []InitializationActionObservation `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` + + // A set of key/value pairs that are used to configure cluster services. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. + // +listType=set + SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` + + // List of services to run on Data Proc cluster. + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` +} + +type HadoopParameters struct { + + // List of initialization scripts. The structure is documented below. + // +kubebuilder:validation:Optional + InitializationAction []InitializationActionParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` + + // A set of key/value pairs that are used to configure cluster services. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // List of SSH public keys to put to the hosts of the cluster. For information on how to connect to the cluster, see the official documentation. + // +kubebuilder:validation:Optional + // +listType=set + SSHPublicKeys []*string `json:"sshPublicKeys,omitempty" tf:"ssh_public_keys,omitempty"` + + // List of services to run on Data Proc cluster. + // +kubebuilder:validation:Optional + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` +} + +type InitializationActionInitParameters struct { + + // List of arguments of the initialization script. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Script execution timeout, in seconds. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Script URI. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InitializationActionObservation struct { + + // List of arguments of the initialization script. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Script execution timeout, in seconds. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Script URI. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InitializationActionParameters struct { + + // List of arguments of the initialization script. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Script execution timeout, in seconds. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Script URI. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type ResourcesInitParameters struct { + + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of a host. One of network-hdd (default) or network-ssd. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + // The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type ResourcesObservation struct { + + // Volume of the storage available to a host, in gigabytes. + DiskSize *float64 `json:"diskSize,omitempty" tf:"disk_size,omitempty"` + + // Type of the storage of a host. One of network-hdd (default) or network-ssd. + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + // The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. + ResourcePresetID *string `json:"resourcePresetId,omitempty" tf:"resource_preset_id,omitempty"` +} + +type ResourcesParameters struct { + + // Volume of the storage available to a host, in gigabytes. + // +kubebuilder:validation:Optional + DiskSize *float64 `json:"diskSize" tf:"disk_size,omitempty"` + + // Type of the storage of a host. One of network-hdd (default) or network-ssd. + // +kubebuilder:validation:Optional + DiskTypeID *string `json:"diskTypeId,omitempty" tf:"disk_type_id,omitempty"` + + // The ID of the preset for computational resources available to a host. All available presets are listed in the documentation. + // +kubebuilder:validation:Optional + ResourcePresetID *string `json:"resourcePresetId" tf:"resource_preset_id,omitempty"` +} + +type SubclusterSpecInitParameters struct { + + // If true then assign public IP addresses to the hosts of the subclusters. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Autoscaling configuration for compute subclusters. + AutoscalingConfig []AutoscalingConfigInitParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + + // Number of hosts within Data Proc subcluster. + HostsCount *float64 `json:"hostsCount,omitempty" tf:"hosts_count,omitempty"` + + // Name of the Data Proc subcluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resources allocated to each host of the Data Proc subcluster. The structure is documented below. + Resources []ResourcesInitParameters `json:"resources,omitempty" tf:"resources,omitempty"` + + // Role of the subcluster in the Data Proc cluster. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type SubclusterSpecObservation struct { + + // If true then assign public IP addresses to the hosts of the subclusters. + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Autoscaling configuration for compute subclusters. + AutoscalingConfig []AutoscalingConfigObservation `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + + // Number of hosts within Data Proc subcluster. + HostsCount *float64 `json:"hostsCount,omitempty" tf:"hosts_count,omitempty"` + + // (Computed) ID of a new Data Proc cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of the Data Proc subcluster. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Resources allocated to each host of the Data Proc subcluster. The structure is documented below. + Resources []ResourcesObservation `json:"resources,omitempty" tf:"resources,omitempty"` + + // Role of the subcluster in the Data Proc cluster. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type SubclusterSpecParameters struct { + + // If true then assign public IP addresses to the hosts of the subclusters. + // +kubebuilder:validation:Optional + AssignPublicIP *bool `json:"assignPublicIp,omitempty" tf:"assign_public_ip,omitempty"` + + // Autoscaling configuration for compute subclusters. + // +kubebuilder:validation:Optional + AutoscalingConfig []AutoscalingConfigParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + + // Number of hosts within Data Proc subcluster. + // +kubebuilder:validation:Optional + HostsCount *float64 `json:"hostsCount" tf:"hosts_count,omitempty"` + + // Name of the Data Proc subcluster. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Resources allocated to each host of the Data Proc subcluster. The structure is documented below. + // +kubebuilder:validation:Optional + Resources []ResourcesParameters `json:"resources" tf:"resources,omitempty"` + + // Role of the subcluster in the Data Proc cluster. + // +kubebuilder:validation:Optional + Role *string `json:"role" tf:"role,omitempty"` + + // The ID of the subnet, to which hosts of the subcluster belong. Subnets of all the subclusters must belong to the same VPC network. + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in vpc to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Cluster is the Schema for the Clusters API. Manages a Data Proc cluster within Yandex.Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterConfig) || (has(self.initProvider) && has(self.initProvider.clusterConfig))",message="spec.forProvider.clusterConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go b/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..aecfbdf --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} diff --git a/apis/dataproc/v1alpha1/zz_generated.deepcopy.go b/apis/dataproc/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b0b9dd1 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1291 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigInitParameters) DeepCopyInto(out *AutoscalingConfigInitParameters) { + *out = *in + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(string) + **out = **in + } + if in.DecommissionTimeout != nil { + in, out := &in.DecommissionTimeout, &out.DecommissionTimeout + *out = new(string) + **out = **in + } + if in.MaxHostsCount != nil { + in, out := &in.MaxHostsCount, &out.MaxHostsCount + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigInitParameters. +func (in *AutoscalingConfigInitParameters) DeepCopy() *AutoscalingConfigInitParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigObservation) DeepCopyInto(out *AutoscalingConfigObservation) { + *out = *in + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(string) + **out = **in + } + if in.DecommissionTimeout != nil { + in, out := &in.DecommissionTimeout, &out.DecommissionTimeout + *out = new(string) + **out = **in + } + if in.MaxHostsCount != nil { + in, out := &in.MaxHostsCount, &out.MaxHostsCount + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigObservation. +func (in *AutoscalingConfigObservation) DeepCopy() *AutoscalingConfigObservation { + if in == nil { + return nil + } + out := new(AutoscalingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigParameters) DeepCopyInto(out *AutoscalingConfigParameters) { + *out = *in + if in.CPUUtilizationTarget != nil { + in, out := &in.CPUUtilizationTarget, &out.CPUUtilizationTarget + *out = new(string) + **out = **in + } + if in.DecommissionTimeout != nil { + in, out := &in.DecommissionTimeout, &out.DecommissionTimeout + *out = new(string) + **out = **in + } + if in.MaxHostsCount != nil { + in, out := &in.MaxHostsCount, &out.MaxHostsCount + *out = new(float64) + **out = **in + } + if in.MeasurementDuration != nil { + in, out := &in.MeasurementDuration, &out.MeasurementDuration + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } + if in.WarmupDuration != nil { + in, out := &in.WarmupDuration, &out.WarmupDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigParameters. +func (in *AutoscalingConfigParameters) DeepCopy() *AutoscalingConfigParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigInitParameters) DeepCopyInto(out *ClusterConfigInitParameters) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = make([]HadoopInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubclusterSpec != nil { + in, out := &in.SubclusterSpec, &out.SubclusterSpec + *out = make([]SubclusterSpecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigInitParameters. +func (in *ClusterConfigInitParameters) DeepCopy() *ClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(ClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = make([]HadoopObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubclusterSpec != nil { + in, out := &in.SubclusterSpec, &out.SubclusterSpec + *out = make([]SubclusterSpecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = make([]HadoopParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubclusterSpec != nil { + in, out := &in.SubclusterSpec, &out.SubclusterSpec + *out = make([]SubclusterSpecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UIProxy != nil { + in, out := &in.UIProxy, &out.UIProxy + *out = new(bool) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.UIProxy != nil { + in, out := &in.UIProxy, &out.UIProxy + *out = new(bool) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.BucketRef != nil { + in, out := &in.BucketRef, &out.BucketRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.BucketSelector != nil { + in, out := &in.BucketSelector, &out.BucketSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionProtection != nil { + in, out := &in.DeletionProtection, &out.DeletionProtection + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostGroupIds != nil { + in, out := &in.HostGroupIds, &out.HostGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.ServiceAccountIDRef != nil { + in, out := &in.ServiceAccountIDRef, &out.ServiceAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountIDSelector != nil { + in, out := &in.ServiceAccountIDSelector, &out.ServiceAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UIProxy != nil { + in, out := &in.UIProxy, &out.UIProxy + *out = new(bool) + **out = **in + } + if in.ZoneID != nil { + in, out := &in.ZoneID, &out.ZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopInitParameters) DeepCopyInto(out *HadoopInitParameters) { + *out = *in + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SSHPublicKeys != nil { + in, out := &in.SSHPublicKeys, &out.SSHPublicKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopInitParameters. +func (in *HadoopInitParameters) DeepCopy() *HadoopInitParameters { + if in == nil { + return nil + } + out := new(HadoopInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopObservation) DeepCopyInto(out *HadoopObservation) { + *out = *in + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SSHPublicKeys != nil { + in, out := &in.SSHPublicKeys, &out.SSHPublicKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopObservation. +func (in *HadoopObservation) DeepCopy() *HadoopObservation { + if in == nil { + return nil + } + out := new(HadoopObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopParameters) DeepCopyInto(out *HadoopParameters) { + *out = *in + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SSHPublicKeys != nil { + in, out := &in.SSHPublicKeys, &out.SSHPublicKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopParameters. +func (in *HadoopParameters) DeepCopy() *HadoopParameters { + if in == nil { + return nil + } + out := new(HadoopParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionInitParameters) DeepCopyInto(out *InitializationActionInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionInitParameters. +func (in *InitializationActionInitParameters) DeepCopy() *InitializationActionInitParameters { + if in == nil { + return nil + } + out := new(InitializationActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionObservation) DeepCopyInto(out *InitializationActionObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionObservation. +func (in *InitializationActionObservation) DeepCopy() *InitializationActionObservation { + if in == nil { + return nil + } + out := new(InitializationActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionParameters) DeepCopyInto(out *InitializationActionParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionParameters. +func (in *InitializationActionParameters) DeepCopy() *InitializationActionParameters { + if in == nil { + return nil + } + out := new(InitializationActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesInitParameters) DeepCopyInto(out *ResourcesInitParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesInitParameters. +func (in *ResourcesInitParameters) DeepCopy() *ResourcesInitParameters { + if in == nil { + return nil + } + out := new(ResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesObservation) DeepCopyInto(out *ResourcesObservation) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesObservation. +func (in *ResourcesObservation) DeepCopy() *ResourcesObservation { + if in == nil { + return nil + } + out := new(ResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesParameters) DeepCopyInto(out *ResourcesParameters) { + *out = *in + if in.DiskSize != nil { + in, out := &in.DiskSize, &out.DiskSize + *out = new(float64) + **out = **in + } + if in.DiskTypeID != nil { + in, out := &in.DiskTypeID, &out.DiskTypeID + *out = new(string) + **out = **in + } + if in.ResourcePresetID != nil { + in, out := &in.ResourcePresetID, &out.ResourcePresetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesParameters. +func (in *ResourcesParameters) DeepCopy() *ResourcesParameters { + if in == nil { + return nil + } + out := new(ResourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubclusterSpecInitParameters) DeepCopyInto(out *SubclusterSpecInitParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostsCount != nil { + in, out := &in.HostsCount, &out.HostsCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubclusterSpecInitParameters. +func (in *SubclusterSpecInitParameters) DeepCopy() *SubclusterSpecInitParameters { + if in == nil { + return nil + } + out := new(SubclusterSpecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubclusterSpecObservation) DeepCopyInto(out *SubclusterSpecObservation) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostsCount != nil { + in, out := &in.HostsCount, &out.HostsCount + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubclusterSpecObservation. +func (in *SubclusterSpecObservation) DeepCopy() *SubclusterSpecObservation { + if in == nil { + return nil + } + out := new(SubclusterSpecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubclusterSpecParameters) DeepCopyInto(out *SubclusterSpecParameters) { + *out = *in + if in.AssignPublicIP != nil { + in, out := &in.AssignPublicIP, &out.AssignPublicIP + *out = new(bool) + **out = **in + } + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostsCount != nil { + in, out := &in.HostsCount, &out.HostsCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubclusterSpecParameters. +func (in *SubclusterSpecParameters) DeepCopy() *SubclusterSpecParameters { + if in == nil { + return nil + } + out := new(SubclusterSpecParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dataproc/v1alpha1/zz_generated.managed.go b/apis/dataproc/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..b694481 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dataproc/v1alpha1/zz_generated.managedlist.go b/apis/dataproc/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..dc19f5e --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dataproc/v1alpha1/zz_generated.resolvers.go b/apis/dataproc/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..03f9705 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,161 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + v1alpha13 "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha12 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" + v1alpha11 "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Cluster. +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.ForProvider.BucketRef, + Selector: mg.Spec.ForProvider.BucketSelector, + To: reference.To{ + List: &v1alpha1.BucketList{}, + Managed: &v1alpha1.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Bucket") + } + mg.Spec.ForProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.BucketRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ClusterConfig); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID") + } + mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha12.FolderList{}, + Managed: &v1alpha12.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServiceAccountIDRef, + Selector: mg.Spec.ForProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha13.ServiceAccountList{}, + Managed: &v1alpha13.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceAccountID") + } + mg.Spec.ForProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceAccountIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Bucket), + Extract: resource.ExtractParamPath("bucket", false), + Reference: mg.Spec.InitProvider.BucketRef, + Selector: mg.Spec.InitProvider.BucketSelector, + To: reference.To{ + List: &v1alpha1.BucketList{}, + Managed: &v1alpha1.Bucket{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Bucket") + } + mg.Spec.InitProvider.Bucket = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.BucketRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.ClusterConfig); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDSelector, + To: reference.To{ + List: &v1alpha11.SubnetList{}, + Managed: &v1alpha11.Subnet{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID") + } + mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterConfig[i3].SubclusterSpec[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha12.FolderList{}, + Managed: &v1alpha12.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServiceAccountIDRef, + Selector: mg.Spec.InitProvider.ServiceAccountIDSelector, + To: reference.To{ + List: &v1alpha13.ServiceAccountList{}, + Managed: &v1alpha13.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceAccountID") + } + mg.Spec.InitProvider.ServiceAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dataproc/v1alpha1/zz_groupversion_info.go b/apis/dataproc/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..b03faf4 --- /dev/null +++ b/apis/dataproc/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dataproc.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dataproc.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/function/v1alpha1/zz_generated.conversion_hubs.go b/apis/function/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..2451a4a --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,12 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *IAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ScalingPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Trigger) Hub() {} diff --git a/apis/function/v1alpha1/zz_generated.deepcopy.go b/apis/function/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b505208 --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,2546 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryInitParameters) DeepCopyInto(out *ContainerRegistryInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.CreateImage != nil { + in, out := &in.CreateImage, &out.CreateImage + *out = new(bool) + **out = **in + } + if in.CreateImageTag != nil { + in, out := &in.CreateImageTag, &out.CreateImageTag + *out = new(bool) + **out = **in + } + if in.DeleteImage != nil { + in, out := &in.DeleteImage, &out.DeleteImage + *out = new(bool) + **out = **in + } + if in.DeleteImageTag != nil { + in, out := &in.DeleteImageTag, &out.DeleteImageTag + *out = new(bool) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryInitParameters. +func (in *ContainerRegistryInitParameters) DeepCopy() *ContainerRegistryInitParameters { + if in == nil { + return nil + } + out := new(ContainerRegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryObservation) DeepCopyInto(out *ContainerRegistryObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.CreateImage != nil { + in, out := &in.CreateImage, &out.CreateImage + *out = new(bool) + **out = **in + } + if in.CreateImageTag != nil { + in, out := &in.CreateImageTag, &out.CreateImageTag + *out = new(bool) + **out = **in + } + if in.DeleteImage != nil { + in, out := &in.DeleteImage, &out.DeleteImage + *out = new(bool) + **out = **in + } + if in.DeleteImageTag != nil { + in, out := &in.DeleteImageTag, &out.DeleteImageTag + *out = new(bool) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryObservation. +func (in *ContainerRegistryObservation) DeepCopy() *ContainerRegistryObservation { + if in == nil { + return nil + } + out := new(ContainerRegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryParameters) DeepCopyInto(out *ContainerRegistryParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.CreateImage != nil { + in, out := &in.CreateImage, &out.CreateImage + *out = new(bool) + **out = **in + } + if in.CreateImageTag != nil { + in, out := &in.CreateImageTag, &out.CreateImageTag + *out = new(bool) + **out = **in + } + if in.DeleteImage != nil { + in, out := &in.DeleteImage, &out.DeleteImage + *out = new(bool) + **out = **in + } + if in.DeleteImageTag != nil { + in, out := &in.DeleteImageTag, &out.DeleteImageTag + *out = new(bool) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryParameters. +func (in *ContainerRegistryParameters) DeepCopy() *ContainerRegistryParameters { + if in == nil { + return nil + } + out := new(ContainerRegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamsInitParameters) DeepCopyInto(out *DataStreamsInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamsInitParameters. +func (in *DataStreamsInitParameters) DeepCopy() *DataStreamsInitParameters { + if in == nil { + return nil + } + out := new(DataStreamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamsObservation) DeepCopyInto(out *DataStreamsObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamsObservation. +func (in *DataStreamsObservation) DeepCopy() *DataStreamsObservation { + if in == nil { + return nil + } + out := new(DataStreamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStreamsParameters) DeepCopyInto(out *DataStreamsParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStreamsParameters. +func (in *DataStreamsParameters) DeepCopy() *DataStreamsParameters { + if in == nil { + return nil + } + out := new(DataStreamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DlqInitParameters) DeepCopyInto(out *DlqInitParameters) { + *out = *in + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DlqInitParameters. +func (in *DlqInitParameters) DeepCopy() *DlqInitParameters { + if in == nil { + return nil + } + out := new(DlqInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DlqObservation) DeepCopyInto(out *DlqObservation) { + *out = *in + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DlqObservation. +func (in *DlqObservation) DeepCopy() *DlqObservation { + if in == nil { + return nil + } + out := new(DlqObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DlqParameters) DeepCopyInto(out *DlqParameters) { + *out = *in + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DlqParameters. +func (in *DlqParameters) DeepCopy() *DlqParameters { + if in == nil { + return nil + } + out := new(DlqParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetryAttempts != nil { + in, out := &in.RetryAttempts, &out.RetryAttempts + *out = new(string) + **out = **in + } + if in.RetryInterval != nil { + in, out := &in.RetryInterval, &out.RetryInterval + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBinding) DeepCopyInto(out *IAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBinding. +func (in *IAMBinding) DeepCopy() *IAMBinding { + if in == nil { + return nil + } + out := new(IAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingInitParameters) DeepCopyInto(out *IAMBindingInitParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingInitParameters. +func (in *IAMBindingInitParameters) DeepCopy() *IAMBindingInitParameters { + if in == nil { + return nil + } + out := new(IAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingList) DeepCopyInto(out *IAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingList. +func (in *IAMBindingList) DeepCopy() *IAMBindingList { + if in == nil { + return nil + } + out := new(IAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingObservation) DeepCopyInto(out *IAMBindingObservation) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingObservation. +func (in *IAMBindingObservation) DeepCopy() *IAMBindingObservation { + if in == nil { + return nil + } + out := new(IAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingParameters) DeepCopyInto(out *IAMBindingParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingParameters. +func (in *IAMBindingParameters) DeepCopy() *IAMBindingParameters { + if in == nil { + return nil + } + out := new(IAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingSpec) DeepCopyInto(out *IAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingSpec. +func (in *IAMBindingSpec) DeepCopy() *IAMBindingSpec { + if in == nil { + return nil + } + out := new(IAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IAMBindingStatus) DeepCopyInto(out *IAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IAMBindingStatus. +func (in *IAMBindingStatus) DeepCopy() *IAMBindingStatus { + if in == nil { + return nil + } + out := new(IAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotInitParameters) DeepCopyInto(out *IotInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.DeviceID != nil { + in, out := &in.DeviceID, &out.DeviceID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotInitParameters. +func (in *IotInitParameters) DeepCopy() *IotInitParameters { + if in == nil { + return nil + } + out := new(IotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotObservation) DeepCopyInto(out *IotObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.DeviceID != nil { + in, out := &in.DeviceID, &out.DeviceID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotObservation. +func (in *IotObservation) DeepCopy() *IotObservation { + if in == nil { + return nil + } + out := new(IotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IotParameters) DeepCopyInto(out *IotParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.DeviceID != nil { + in, out := &in.DeviceID, &out.DeviceID + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IotParameters. +func (in *IotParameters) DeepCopy() *IotParameters { + if in == nil { + return nil + } + out := new(IotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogGroupInitParameters) DeepCopyInto(out *LogGroupInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.LogGroupIds != nil { + in, out := &in.LogGroupIds, &out.LogGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogGroupInitParameters. +func (in *LogGroupInitParameters) DeepCopy() *LogGroupInitParameters { + if in == nil { + return nil + } + out := new(LogGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogGroupObservation) DeepCopyInto(out *LogGroupObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.LogGroupIds != nil { + in, out := &in.LogGroupIds, &out.LogGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogGroupObservation. +func (in *LogGroupObservation) DeepCopy() *LogGroupObservation { + if in == nil { + return nil + } + out := new(LogGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogGroupParameters) DeepCopyInto(out *LogGroupParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.LogGroupIds != nil { + in, out := &in.LogGroupIds, &out.LogGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogGroupParameters. +func (in *LogGroupParameters) DeepCopy() *LogGroupParameters { + if in == nil { + return nil + } + out := new(LogGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.GroupID != nil { + in, out := &in.GroupID, &out.GroupID + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StreamNames != nil { + in, out := &in.StreamNames, &out.StreamNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MailInitParameters) DeepCopyInto(out *MailInitParameters) { + *out = *in + if in.AttachmentsBucketID != nil { + in, out := &in.AttachmentsBucketID, &out.AttachmentsBucketID + *out = new(string) + **out = **in + } + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MailInitParameters. +func (in *MailInitParameters) DeepCopy() *MailInitParameters { + if in == nil { + return nil + } + out := new(MailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MailObservation) DeepCopyInto(out *MailObservation) { + *out = *in + if in.AttachmentsBucketID != nil { + in, out := &in.AttachmentsBucketID, &out.AttachmentsBucketID + *out = new(string) + **out = **in + } + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MailObservation. +func (in *MailObservation) DeepCopy() *MailObservation { + if in == nil { + return nil + } + out := new(MailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MailParameters) DeepCopyInto(out *MailParameters) { + *out = *in + if in.AttachmentsBucketID != nil { + in, out := &in.AttachmentsBucketID, &out.AttachmentsBucketID + *out = new(string) + **out = **in + } + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MailParameters. +func (in *MailParameters) DeepCopy() *MailParameters { + if in == nil { + return nil + } + out := new(MailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageQueueInitParameters) DeepCopyInto(out *MessageQueueInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.VisibilityTimeout != nil { + in, out := &in.VisibilityTimeout, &out.VisibilityTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageQueueInitParameters. +func (in *MessageQueueInitParameters) DeepCopy() *MessageQueueInitParameters { + if in == nil { + return nil + } + out := new(MessageQueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageQueueObservation) DeepCopyInto(out *MessageQueueObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.VisibilityTimeout != nil { + in, out := &in.VisibilityTimeout, &out.VisibilityTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageQueueObservation. +func (in *MessageQueueObservation) DeepCopy() *MessageQueueObservation { + if in == nil { + return nil + } + out := new(MessageQueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageQueueParameters) DeepCopyInto(out *MessageQueueParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.QueueID != nil { + in, out := &in.QueueID, &out.QueueID + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.VisibilityTimeout != nil { + in, out := &in.VisibilityTimeout, &out.VisibilityTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageQueueParameters. +func (in *MessageQueueParameters) DeepCopy() *MessageQueueParameters { + if in == nil { + return nil + } + out := new(MessageQueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageInitParameters) DeepCopyInto(out *ObjectStorageInitParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.BucketID != nil { + in, out := &in.BucketID, &out.BucketID + *out = new(string) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageInitParameters. +func (in *ObjectStorageInitParameters) DeepCopy() *ObjectStorageInitParameters { + if in == nil { + return nil + } + out := new(ObjectStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageObservation) DeepCopyInto(out *ObjectStorageObservation) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.BucketID != nil { + in, out := &in.BucketID, &out.BucketID + *out = new(string) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageObservation. +func (in *ObjectStorageObservation) DeepCopy() *ObjectStorageObservation { + if in == nil { + return nil + } + out := new(ObjectStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageParameters) DeepCopyInto(out *ObjectStorageParameters) { + *out = *in + if in.BatchCutoff != nil { + in, out := &in.BatchCutoff, &out.BatchCutoff + *out = new(string) + **out = **in + } + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(string) + **out = **in + } + if in.BucketID != nil { + in, out := &in.BucketID, &out.BucketID + *out = new(string) + **out = **in + } + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageParameters. +func (in *ObjectStorageParameters) DeepCopy() *ObjectStorageParameters { + if in == nil { + return nil + } + out := new(ObjectStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.ZoneInstancesLimit != nil { + in, out := &in.ZoneInstancesLimit, &out.ZoneInstancesLimit + *out = new(float64) + **out = **in + } + if in.ZoneRequestsLimit != nil { + in, out := &in.ZoneRequestsLimit, &out.ZoneRequestsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.ZoneInstancesLimit != nil { + in, out := &in.ZoneInstancesLimit, &out.ZoneInstancesLimit + *out = new(float64) + **out = **in + } + if in.ZoneRequestsLimit != nil { + in, out := &in.ZoneRequestsLimit, &out.ZoneRequestsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.ZoneInstancesLimit != nil { + in, out := &in.ZoneInstancesLimit, &out.ZoneInstancesLimit + *out = new(float64) + **out = **in + } + if in.ZoneRequestsLimit != nil { + in, out := &in.ZoneRequestsLimit, &out.ZoneRequestsLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicy) DeepCopyInto(out *ScalingPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicy. +func (in *ScalingPolicy) DeepCopy() *ScalingPolicy { + if in == nil { + return nil + } + out := new(ScalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyInitParameters) DeepCopyInto(out *ScalingPolicyInitParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyInitParameters. +func (in *ScalingPolicyInitParameters) DeepCopy() *ScalingPolicyInitParameters { + if in == nil { + return nil + } + out := new(ScalingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyList) DeepCopyInto(out *ScalingPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalingPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyList. +func (in *ScalingPolicyList) DeepCopy() *ScalingPolicyList { + if in == nil { + return nil + } + out := new(ScalingPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalingPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyObservation) DeepCopyInto(out *ScalingPolicyObservation) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyObservation. +func (in *ScalingPolicyObservation) DeepCopy() *ScalingPolicyObservation { + if in == nil { + return nil + } + out := new(ScalingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyParameters) DeepCopyInto(out *ScalingPolicyParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyParameters. +func (in *ScalingPolicyParameters) DeepCopy() *ScalingPolicyParameters { + if in == nil { + return nil + } + out := new(ScalingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicySpec) DeepCopyInto(out *ScalingPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicySpec. +func (in *ScalingPolicySpec) DeepCopy() *ScalingPolicySpec { + if in == nil { + return nil + } + out := new(ScalingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingPolicyStatus) DeepCopyInto(out *ScalingPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingPolicyStatus. +func (in *ScalingPolicyStatus) DeepCopy() *ScalingPolicyStatus { + if in == nil { + return nil + } + out := new(ScalingPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimerInitParameters) DeepCopyInto(out *TimerInitParameters) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimerInitParameters. +func (in *TimerInitParameters) DeepCopy() *TimerInitParameters { + if in == nil { + return nil + } + out := new(TimerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimerObservation) DeepCopyInto(out *TimerObservation) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimerObservation. +func (in *TimerObservation) DeepCopy() *TimerObservation { + if in == nil { + return nil + } + out := new(TimerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimerParameters) DeepCopyInto(out *TimerParameters) { + *out = *in + if in.CronExpression != nil { + in, out := &in.CronExpression, &out.CronExpression + *out = new(string) + **out = **in + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimerParameters. +func (in *TimerParameters) DeepCopy() *TimerParameters { + if in == nil { + return nil + } + out := new(TimerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Trigger) DeepCopyInto(out *Trigger) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Trigger. +func (in *Trigger) DeepCopy() *Trigger { + if in == nil { + return nil + } + out := new(Trigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Trigger) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataStreams != nil { + in, out := &in.DataStreams, &out.DataStreams + *out = make([]DataStreamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dlq != nil { + in, out := &in.Dlq, &out.Dlq + *out = make([]DlqInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Iot != nil { + in, out := &in.Iot, &out.Iot + *out = make([]IotInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = make([]LogGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mail != nil { + in, out := &in.Mail, &out.Mail + *out = make([]MailInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MessageQueue != nil { + in, out := &in.MessageQueue, &out.MessageQueue + *out = make([]MessageQueueInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = make([]TimerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerList) DeepCopyInto(out *TriggerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Trigger, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerList. +func (in *TriggerList) DeepCopy() *TriggerList { + if in == nil { + return nil + } + out := new(TriggerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TriggerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DataStreams != nil { + in, out := &in.DataStreams, &out.DataStreams + *out = make([]DataStreamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dlq != nil { + in, out := &in.Dlq, &out.Dlq + *out = make([]DlqObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Iot != nil { + in, out := &in.Iot, &out.Iot + *out = make([]IotObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = make([]LogGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mail != nil { + in, out := &in.Mail, &out.Mail + *out = make([]MailObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MessageQueue != nil { + in, out := &in.MessageQueue, &out.MessageQueue + *out = make([]MessageQueueObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = make([]TimerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataStreams != nil { + in, out := &in.DataStreams, &out.DataStreams + *out = make([]DataStreamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Dlq != nil { + in, out := &in.Dlq, &out.Dlq + *out = make([]DlqParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Function != nil { + in, out := &in.Function, &out.Function + *out = make([]FunctionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Iot != nil { + in, out := &in.Iot, &out.Iot + *out = make([]IotParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogGroup != nil { + in, out := &in.LogGroup, &out.LogGroup + *out = make([]LogGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = make([]LoggingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Mail != nil { + in, out := &in.Mail, &out.Mail + *out = make([]MailParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MessageQueue != nil { + in, out := &in.MessageQueue, &out.MessageQueue + *out = make([]MessageQueueParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = make([]ObjectStorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = make([]TimerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSpec) DeepCopyInto(out *TriggerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSpec. +func (in *TriggerSpec) DeepCopy() *TriggerSpec { + if in == nil { + return nil + } + out := new(TriggerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerStatus) DeepCopyInto(out *TriggerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatus. +func (in *TriggerStatus) DeepCopy() *TriggerStatus { + if in == nil { + return nil + } + out := new(TriggerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/function/v1alpha1/zz_generated.managed.go b/apis/function/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..0c10567 --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.managed.go @@ -0,0 +1,185 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this IAMBinding. +func (mg *IAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IAMBinding. +func (mg *IAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IAMBinding. +func (mg *IAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IAMBinding. +func (mg *IAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IAMBinding. +func (mg *IAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IAMBinding. +func (mg *IAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IAMBinding. +func (mg *IAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IAMBinding. +func (mg *IAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IAMBinding. +func (mg *IAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IAMBinding. +func (mg *IAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IAMBinding. +func (mg *IAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IAMBinding. +func (mg *IAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ScalingPolicy. +func (mg *ScalingPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ScalingPolicy. +func (mg *ScalingPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ScalingPolicy. +func (mg *ScalingPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ScalingPolicy. +func (mg *ScalingPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ScalingPolicy. +func (mg *ScalingPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ScalingPolicy. +func (mg *ScalingPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ScalingPolicy. +func (mg *ScalingPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ScalingPolicy. +func (mg *ScalingPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ScalingPolicy. +func (mg *ScalingPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ScalingPolicy. +func (mg *ScalingPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ScalingPolicy. +func (mg *ScalingPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ScalingPolicy. +func (mg *ScalingPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Trigger. +func (mg *Trigger) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Trigger. +func (mg *Trigger) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Trigger. +func (mg *Trigger) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Trigger. +func (mg *Trigger) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Trigger. +func (mg *Trigger) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Trigger. +func (mg *Trigger) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Trigger. +func (mg *Trigger) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Trigger. +func (mg *Trigger) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Trigger. +func (mg *Trigger) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Trigger. +func (mg *Trigger) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Trigger. +func (mg *Trigger) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Trigger. +func (mg *Trigger) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/function/v1alpha1/zz_generated.managedlist.go b/apis/function/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..0ade8cb --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,32 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IAMBindingList. +func (l *IAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScalingPolicyList. +func (l *ScalingPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TriggerList. +func (l *TriggerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/function/v1alpha1/zz_generated.resolvers.go b/apis/function/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..3e66c4e --- /dev/null +++ b/apis/function/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Trigger. +func (mg *Trigger) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/function/v1alpha1/zz_groupversion_info.go b/apis/function/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..20856ac --- /dev/null +++ b/apis/function/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=function.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "function.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/function/v1alpha1/zz_iambinding_terraformed.go b/apis/function/v1alpha1/zz_iambinding_terraformed.go new file mode 100755 index 0000000..9ef4da5 --- /dev/null +++ b/apis/function/v1alpha1/zz_iambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IAMBinding +func (mg *IAMBinding) GetTerraformResourceType() string { + return "yandex_function_iam_binding" +} + +// GetConnectionDetailsMapping for this IAMBinding +func (tr *IAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IAMBinding +func (tr *IAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IAMBinding +func (tr *IAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IAMBinding +func (tr *IAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IAMBinding +func (tr *IAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IAMBinding +func (tr *IAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IAMBinding +func (tr *IAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IAMBinding +func (tr *IAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &IAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/function/v1alpha1/zz_iambinding_types.go b/apis/function/v1alpha1/zz_iambinding_types.go new file mode 100755 index 0000000..343ac53 --- /dev/null +++ b/apis/function/v1alpha1/zz_iambinding_types.go @@ -0,0 +1,128 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IAMBindingInitParameters struct { + + // The Yandex Cloud Function ID to apply a binding to. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // Identities that will be granted the privilege in role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type IAMBindingObservation struct { + + // The Yandex Cloud Function ID to apply a binding to. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type IAMBindingParameters struct { + + // The Yandex Cloud Function ID to apply a binding to. + // +kubebuilder:validation:Optional + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // Identities that will be granted the privilege in role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. See roles + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// IAMBindingSpec defines the desired state of IAMBinding +type IAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// IAMBindingStatus defines the observed state of IAMBinding. +type IAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// IAMBinding is the Schema for the IAMBindings API. Allows management of a single IAM binding for a +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type IAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionId) || (has(self.initProvider) && has(self.initProvider.functionId))",message="spec.forProvider.functionId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec IAMBindingSpec `json:"spec"` + Status IAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IAMBindingList contains a list of IAMBindings +type IAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + IAMBinding_Kind = "IAMBinding" + IAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IAMBinding_Kind}.String() + IAMBinding_KindAPIVersion = IAMBinding_Kind + "." + CRDGroupVersion.String() + IAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(IAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&IAMBinding{}, &IAMBindingList{}) +} diff --git a/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go b/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go new file mode 100755 index 0000000..a6ea42e --- /dev/null +++ b/apis/function/v1alpha1/zz_scalingpolicy_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ScalingPolicy +func (mg *ScalingPolicy) GetTerraformResourceType() string { + return "yandex_function_scaling_policy" +} + +// GetConnectionDetailsMapping for this ScalingPolicy +func (tr *ScalingPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ScalingPolicy +func (tr *ScalingPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ScalingPolicy +func (tr *ScalingPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ScalingPolicy +func (tr *ScalingPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ScalingPolicy +func (tr *ScalingPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ScalingPolicy +func (tr *ScalingPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ScalingPolicy +func (tr *ScalingPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ScalingPolicy +func (tr *ScalingPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ScalingPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ScalingPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &ScalingPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ScalingPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/function/v1alpha1/zz_scalingpolicy_types.go b/apis/function/v1alpha1/zz_scalingpolicy_types.go new file mode 100755 index 0000000..bb4ccd2 --- /dev/null +++ b/apis/function/v1alpha1/zz_scalingpolicy_types.go @@ -0,0 +1,142 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PolicyInitParameters struct { + + // Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // max number of instances in one zone for Yandex.Cloud Function with tag + ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` + + // max number of requests in one zone for Yandex.Cloud Function with tag + ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` +} + +type PolicyObservation struct { + + // Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // max number of instances in one zone for Yandex.Cloud Function with tag + ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` + + // max number of requests in one zone for Yandex.Cloud Function with tag + ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` +} + +type PolicyParameters struct { + + // Yandex.Cloud Function version tag for Yandex Cloud Function scaling policy + // +kubebuilder:validation:Optional + Tag *string `json:"tag" tf:"tag,omitempty"` + + // max number of instances in one zone for Yandex.Cloud Function with tag + // +kubebuilder:validation:Optional + ZoneInstancesLimit *float64 `json:"zoneInstancesLimit,omitempty" tf:"zone_instances_limit,omitempty"` + + // max number of requests in one zone for Yandex.Cloud Function with tag + // +kubebuilder:validation:Optional + ZoneRequestsLimit *float64 `json:"zoneRequestsLimit,omitempty" tf:"zone_requests_limit,omitempty"` +} + +type ScalingPolicyInitParameters struct { + + // Yandex Cloud Function id used to define function + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // list definition for Yandex Cloud Function scaling policies + Policy []PolicyInitParameters `json:"policy,omitempty" tf:"policy,omitempty"` +} + +type ScalingPolicyObservation struct { + + // Yandex Cloud Function id used to define function + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // list definition for Yandex Cloud Function scaling policies + Policy []PolicyObservation `json:"policy,omitempty" tf:"policy,omitempty"` +} + +type ScalingPolicyParameters struct { + + // Yandex Cloud Function id used to define function + // +kubebuilder:validation:Optional + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // list definition for Yandex Cloud Function scaling policies + // +kubebuilder:validation:Optional + Policy []PolicyParameters `json:"policy,omitempty" tf:"policy,omitempty"` +} + +// ScalingPolicySpec defines the desired state of ScalingPolicy +type ScalingPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScalingPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScalingPolicyInitParameters `json:"initProvider,omitempty"` +} + +// ScalingPolicyStatus defines the observed state of ScalingPolicy. +type ScalingPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScalingPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ScalingPolicy is the Schema for the ScalingPolicys API. Allows management of a Yandex Cloud Function Scaling Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ScalingPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.functionId) || (has(self.initProvider) && has(self.initProvider.functionId))",message="spec.forProvider.functionId is a required parameter" + Spec ScalingPolicySpec `json:"spec"` + Status ScalingPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScalingPolicyList contains a list of ScalingPolicys +type ScalingPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScalingPolicy `json:"items"` +} + +// Repository type metadata. +var ( + ScalingPolicy_Kind = "ScalingPolicy" + ScalingPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ScalingPolicy_Kind}.String() + ScalingPolicy_KindAPIVersion = ScalingPolicy_Kind + "." + CRDGroupVersion.String() + ScalingPolicy_GroupVersionKind = CRDGroupVersion.WithKind(ScalingPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&ScalingPolicy{}, &ScalingPolicyList{}) +} diff --git a/apis/function/v1alpha1/zz_trigger_terraformed.go b/apis/function/v1alpha1/zz_trigger_terraformed.go new file mode 100755 index 0000000..bcb71aa --- /dev/null +++ b/apis/function/v1alpha1/zz_trigger_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Trigger +func (mg *Trigger) GetTerraformResourceType() string { + return "yandex_function_trigger" +} + +// GetConnectionDetailsMapping for this Trigger +func (tr *Trigger) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Trigger +func (tr *Trigger) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Trigger +func (tr *Trigger) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Trigger +func (tr *Trigger) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Trigger +func (tr *Trigger) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Trigger +func (tr *Trigger) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Trigger +func (tr *Trigger) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Trigger +func (tr *Trigger) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Trigger using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Trigger) LateInitialize(attrs []byte) (bool, error) { + params := &TriggerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Trigger) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/function/v1alpha1/zz_trigger_types.go b/apis/function/v1alpha1/zz_trigger_types.go new file mode 100755 index 0000000..a2193a8 --- /dev/null +++ b/apis/function/v1alpha1/zz_trigger_types.go @@ -0,0 +1,980 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContainerInitParameters struct { + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ContainerObservation struct { + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ContainerParameters struct { + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Path for Yandex.Cloud Serverless Container for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type ContainerRegistryInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Boolean flag for setting create image event for Yandex Cloud Functions Trigger + CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` + + // Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger + CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` + + // Boolean flag for setting delete image event for Yandex Cloud Functions Trigger + DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` + + // Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger + DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` + + // Image name filter setting for Yandex Cloud Functions Trigger + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type ContainerRegistryObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Boolean flag for setting create image event for Yandex Cloud Functions Trigger + CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` + + // Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger + CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` + + // Boolean flag for setting delete image event for Yandex Cloud Functions Trigger + DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` + + // Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger + DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` + + // Image name filter setting for Yandex Cloud Functions Trigger + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type ContainerRegistryParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Boolean flag for setting create image event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + CreateImage *bool `json:"createImage,omitempty" tf:"create_image,omitempty"` + + // Boolean flag for setting create image tag event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + CreateImageTag *bool `json:"createImageTag,omitempty" tf:"create_image_tag,omitempty"` + + // Boolean flag for setting delete image event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + DeleteImage *bool `json:"deleteImage,omitempty" tf:"delete_image,omitempty"` + + // Boolean flag for setting delete image tag event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + DeleteImageTag *bool `json:"deleteImageTag,omitempty" tf:"delete_image_tag,omitempty"` + + // Image name filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // IoT Registry ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` + + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type DataStreamsInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Stream database for Yandex Cloud Functions Trigger + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Stream name for Yandex Cloud Functions Trigger + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type DataStreamsObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Stream database for Yandex Cloud Functions Trigger + Database *string `json:"database,omitempty" tf:"database,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Stream name for Yandex Cloud Functions Trigger + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type DataStreamsParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Stream database for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Database *string `json:"database" tf:"database,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` + + // Stream name for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` +} + +type DlqInitParameters struct { + + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type DlqObservation struct { + + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type DlqParameters struct { + + // Message Queue ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + QueueID *string `json:"queueId" tf:"queue_id,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` +} + +type FunctionInitParameters struct { + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FunctionObservation struct { + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FunctionParameters struct { + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Retry attempts for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryAttempts *string `json:"retryAttempts,omitempty" tf:"retry_attempts,omitempty"` + + // Retry interval in seconds for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RetryInterval *string `json:"retryInterval,omitempty" tf:"retry_interval,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Tag for Yandex.Cloud Function for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type IotInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // IoT Device ID for Yandex Cloud Functions Trigger + DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` + + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // IoT Topic for Yandex Cloud Functions Trigger + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type IotObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // IoT Device ID for Yandex Cloud Functions Trigger + DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` + + // IoT Registry ID for Yandex Cloud Functions Trigger + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` + + // IoT Topic for Yandex Cloud Functions Trigger + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type IotParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // IoT Device ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + DeviceID *string `json:"deviceId,omitempty" tf:"device_id,omitempty"` + + // IoT Registry ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId" tf:"registry_id,omitempty"` + + // IoT Topic for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type LogGroupInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +listType=set + LogGroupIds []*string `json:"logGroupIds,omitempty" tf:"log_group_ids,omitempty"` +} + +type LogGroupObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +listType=set + LogGroupIds []*string `json:"logGroupIds,omitempty" tf:"log_group_ids,omitempty"` +} + +type LogGroupParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + LogGroupIds []*string `json:"logGroupIds" tf:"log_group_ids,omitempty"` +} + +type LoggingInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Logging group ID for Yandex Cloud Functions Trigger + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Logging level filter setting for Yandex Cloud Functions Trigger + // +listType=set + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + + // Resource ID filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Resource type filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // Logging stream name filter setting for Yandex Cloud Functions Trigger + // +listType=set + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` +} + +type LoggingObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Logging group ID for Yandex Cloud Functions Trigger + GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` + + // Logging level filter setting for Yandex Cloud Functions Trigger + // +listType=set + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + + // Resource ID filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Resource type filter setting for Yandex Cloud Functions Trigger + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // Logging stream name filter setting for Yandex Cloud Functions Trigger + // +listType=set + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` +} + +type LoggingParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Logging group ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + GroupID *string `json:"groupId" tf:"group_id,omitempty"` + + // Logging level filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + + // Resource ID filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Resource type filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // Logging stream name filter setting for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +listType=set + StreamNames []*string `json:"streamNames,omitempty" tf:"stream_names,omitempty"` +} + +type MailInitParameters struct { + + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type MailObservation struct { + + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type MailParameters struct { + + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + AttachmentsBucketID *string `json:"attachmentsBucketId,omitempty" tf:"attachments_bucket_id,omitempty"` + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type MessageQueueInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Visibility timeout for Yandex Cloud Functions Trigger + VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` +} + +type MessageQueueObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Message Queue ID for Yandex Cloud Functions Trigger + QueueID *string `json:"queueId,omitempty" tf:"queue_id,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Visibility timeout for Yandex Cloud Functions Trigger + VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` +} + +type MessageQueueParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Message Queue ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + QueueID *string `json:"queueId" tf:"queue_id,omitempty"` + + // Message Queue Service Account ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` + + // Visibility timeout for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + VisibilityTimeout *string `json:"visibilityTimeout,omitempty" tf:"visibility_timeout,omitempty"` +} + +type ObjectStorageInitParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + BucketID *string `json:"bucketId,omitempty" tf:"bucket_id,omitempty"` + + // Boolean flag for setting create event for Yandex Cloud Functions Trigger + Create *bool `json:"create,omitempty" tf:"create,omitempty"` + + // Boolean flag for setting delete event for Yandex Cloud Functions Trigger + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // Prefix for Object Storage for Yandex Cloud Functions Trigger + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Suffix for Object Storage for Yandex Cloud Functions Trigger + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` + + // Boolean flag for setting update event for Yandex Cloud Functions Trigger + Update *bool `json:"update,omitempty" tf:"update,omitempty"` +} + +type ObjectStorageObservation struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + BatchCutoff *string `json:"batchCutoff,omitempty" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + BucketID *string `json:"bucketId,omitempty" tf:"bucket_id,omitempty"` + + // Boolean flag for setting create event for Yandex Cloud Functions Trigger + Create *bool `json:"create,omitempty" tf:"create,omitempty"` + + // Boolean flag for setting delete event for Yandex Cloud Functions Trigger + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // Prefix for Object Storage for Yandex Cloud Functions Trigger + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Suffix for Object Storage for Yandex Cloud Functions Trigger + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` + + // Boolean flag for setting update event for Yandex Cloud Functions Trigger + Update *bool `json:"update,omitempty" tf:"update,omitempty"` +} + +type ObjectStorageParameters struct { + + // Batch Duration in seconds for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchCutoff *string `json:"batchCutoff" tf:"batch_cutoff,omitempty"` + + // Batch Size for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BatchSize *string `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // Object Storage Bucket ID for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + BucketID *string `json:"bucketId" tf:"bucket_id,omitempty"` + + // Boolean flag for setting create event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Create *bool `json:"create,omitempty" tf:"create,omitempty"` + + // Boolean flag for setting delete event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // Prefix for Object Storage for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Suffix for Object Storage for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` + + // Boolean flag for setting update event for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Update *bool `json:"update,omitempty" tf:"update,omitempty"` +} + +type TimerInitParameters struct { + + // Cron expression for timer for Yandex Cloud Functions Trigger + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` + + // Payload to be passed to function + Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` +} + +type TimerObservation struct { + + // Cron expression for timer for Yandex Cloud Functions Trigger + CronExpression *string `json:"cronExpression,omitempty" tf:"cron_expression,omitempty"` + + // Payload to be passed to function + Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` +} + +type TimerParameters struct { + + // Cron expression for timer for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + CronExpression *string `json:"cronExpression" tf:"cron_expression,omitempty"` + + // Payload to be passed to function + // +kubebuilder:validation:Optional + Payload *string `json:"payload,omitempty" tf:"payload,omitempty"` +} + +type TriggerInitParameters struct { + Container []ContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` + + // Container Registry settings definition for Yandex Cloud Functions Trigger, if present + ContainerRegistry []ContainerRegistryInitParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + + // Data Streams settings definition for Yandex Cloud Functions Trigger, if present + DataStreams []DataStreamsInitParameters `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` + + // Description of the Yandex Cloud Functions Trigger + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Dead Letter Queue settings definition for Yandex Cloud Functions Trigger + Dlq []DlqInitParameters `json:"dlq,omitempty" tf:"dlq,omitempty"` + + // Folder ID for the Yandex Cloud Functions Trigger + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger + Function []FunctionInitParameters `json:"function,omitempty" tf:"function,omitempty"` + + // IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. + Iot []IotInitParameters `json:"iot,omitempty" tf:"iot,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + LogGroup []LogGroupInitParameters `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Logging []LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Mail []MailInitParameters `json:"mail,omitempty" tf:"mail,omitempty"` + + // Message Queue settings definition for Yandex Cloud Functions Trigger, if present + MessageQueue []MessageQueueInitParameters `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` + + // Yandex Cloud Functions Trigger name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Object Storage settings definition for Yandex Cloud Functions Trigger, if present + ObjectStorage []ObjectStorageInitParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + + // Timer settings definition for Yandex Cloud Functions Trigger, if present + Timer []TimerInitParameters `json:"timer,omitempty" tf:"timer,omitempty"` +} + +type TriggerObservation struct { + Container []ContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + + // Container Registry settings definition for Yandex Cloud Functions Trigger, if present + ContainerRegistry []ContainerRegistryObservation `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + + // Creation timestamp of the Yandex Cloud Functions Trigger + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Data Streams settings definition for Yandex Cloud Functions Trigger, if present + DataStreams []DataStreamsObservation `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` + + // Description of the Yandex Cloud Functions Trigger + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Dead Letter Queue settings definition for Yandex Cloud Functions Trigger + Dlq []DlqObservation `json:"dlq,omitempty" tf:"dlq,omitempty"` + + // Folder ID for the Yandex Cloud Functions Trigger + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger + Function []FunctionObservation `json:"function,omitempty" tf:"function,omitempty"` + + // Yandex.Cloud Function ID for Yandex Cloud Functions Trigger + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. + Iot []IotObservation `json:"iot,omitempty" tf:"iot,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + LogGroup []LogGroupObservation `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Logging []LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // Logging settings definition for Yandex Cloud Functions Trigger, if present + Mail []MailObservation `json:"mail,omitempty" tf:"mail,omitempty"` + + // Message Queue settings definition for Yandex Cloud Functions Trigger, if present + MessageQueue []MessageQueueObservation `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` + + // Yandex Cloud Functions Trigger name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Object Storage settings definition for Yandex Cloud Functions Trigger, if present + ObjectStorage []ObjectStorageObservation `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + + // Timer settings definition for Yandex Cloud Functions Trigger, if present + Timer []TimerObservation `json:"timer,omitempty" tf:"timer,omitempty"` +} + +type TriggerParameters struct { + + // +kubebuilder:validation:Optional + Container []ContainerParameters `json:"container,omitempty" tf:"container,omitempty"` + + // Container Registry settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + ContainerRegistry []ContainerRegistryParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + + // Data Streams settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + DataStreams []DataStreamsParameters `json:"dataStreams,omitempty" tf:"data_streams,omitempty"` + + // Description of the Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Dead Letter Queue settings definition for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Dlq []DlqParameters `json:"dlq,omitempty" tf:"dlq,omitempty"` + + // Folder ID for the Yandex Cloud Functions Trigger + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Yandex.Cloud Function settings definition for Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + Function []FunctionParameters `json:"function,omitempty" tf:"function,omitempty"` + + // IoT settings definition for Yandex Cloud Functions Trigger, if present. Only one section iot or message_queue or object_storage or timer can be defined. + // +kubebuilder:validation:Optional + Iot []IotParameters `json:"iot,omitempty" tf:"iot,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Functions Trigger + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // +kubebuilder:validation:Optional + LogGroup []LogGroupParameters `json:"logGroup,omitempty" tf:"log_group,omitempty"` + + // Logging settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + Logging []LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // Logging settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + Mail []MailParameters `json:"mail,omitempty" tf:"mail,omitempty"` + + // Message Queue settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + MessageQueue []MessageQueueParameters `json:"messageQueue,omitempty" tf:"message_queue,omitempty"` + + // Yandex Cloud Functions Trigger name used to define trigger + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Object Storage settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + ObjectStorage []ObjectStorageParameters `json:"objectStorage,omitempty" tf:"object_storage,omitempty"` + + // Timer settings definition for Yandex Cloud Functions Trigger, if present + // +kubebuilder:validation:Optional + Timer []TimerParameters `json:"timer,omitempty" tf:"timer,omitempty"` +} + +// TriggerSpec defines the desired state of Trigger +type TriggerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TriggerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TriggerInitParameters `json:"initProvider,omitempty"` +} + +// TriggerStatus defines the observed state of Trigger. +type TriggerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TriggerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Trigger is the Schema for the Triggers API. Allows management of a Yandex Cloud Functions Trigger. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Trigger struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec TriggerSpec `json:"spec"` + Status TriggerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TriggerList contains a list of Triggers +type TriggerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Trigger `json:"items"` +} + +// Repository type metadata. +var ( + Trigger_Kind = "Trigger" + Trigger_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Trigger_Kind}.String() + Trigger_KindAPIVersion = Trigger_Kind + "." + CRDGroupVersion.String() + Trigger_GroupVersionKind = CRDGroupVersion.WithKind(Trigger_Kind) +) + +func init() { + SchemeBuilder.Register(&Trigger{}, &TriggerList{}) +} diff --git a/apis/iot/v1alpha1/zz_corebroker_terraformed.go b/apis/iot/v1alpha1/zz_corebroker_terraformed.go new file mode 100755 index 0000000..599a4f9 --- /dev/null +++ b/apis/iot/v1alpha1/zz_corebroker_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CoreBroker +func (mg *CoreBroker) GetTerraformResourceType() string { + return "yandex_iot_core_broker" +} + +// GetConnectionDetailsMapping for this CoreBroker +func (tr *CoreBroker) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CoreBroker +func (tr *CoreBroker) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CoreBroker +func (tr *CoreBroker) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CoreBroker +func (tr *CoreBroker) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CoreBroker +func (tr *CoreBroker) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CoreBroker +func (tr *CoreBroker) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CoreBroker +func (tr *CoreBroker) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CoreBroker +func (tr *CoreBroker) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CoreBroker using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CoreBroker) LateInitialize(attrs []byte) (bool, error) { + params := &CoreBrokerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CoreBroker) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1alpha1/zz_corebroker_types.go b/apis/iot/v1alpha1/zz_corebroker_types.go new file mode 100755 index 0000000..62ae21b --- /dev/null +++ b/apis/iot/v1alpha1/zz_corebroker_types.go @@ -0,0 +1,219 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CoreBrokerInitParameters struct { + + // A set of certificate's fingerprints for the IoT Core Broker + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Description of the IoT Core Broker + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder ID for the IoT Core Broker + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the IoT Core Broker. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging for IoT Core Broker + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // IoT Core Broker name used to define broker + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CoreBrokerObservation struct { + + // A set of certificate's fingerprints for the IoT Core Broker + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Creation timestamp of the IoT Core Broker + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the IoT Core Broker + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder ID for the IoT Core Broker + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the IoT Core Broker. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging for IoT Core Broker + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // IoT Core Broker name used to define broker + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CoreBrokerParameters struct { + + // A set of certificate's fingerprints for the IoT Core Broker + // +kubebuilder:validation:Optional + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Description of the IoT Core Broker + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder ID for the IoT Core Broker + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the IoT Core Broker. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging for IoT Core Broker + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // IoT Core Broker name used to define broker + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type LogOptionsInitParameters struct { + + // Is logging for broker disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsObservation struct { + + // Is logging for broker disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsParameters struct { + + // Is logging for broker disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +// CoreBrokerSpec defines the desired state of CoreBroker +type CoreBrokerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CoreBrokerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CoreBrokerInitParameters `json:"initProvider,omitempty"` +} + +// CoreBrokerStatus defines the observed state of CoreBroker. +type CoreBrokerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CoreBrokerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CoreBroker is the Schema for the CoreBrokers API. Allows management of a Yandex.Cloud IoT Core Broker. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type CoreBroker struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CoreBrokerSpec `json:"spec"` + Status CoreBrokerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CoreBrokerList contains a list of CoreBrokers +type CoreBrokerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CoreBroker `json:"items"` +} + +// Repository type metadata. +var ( + CoreBroker_Kind = "CoreBroker" + CoreBroker_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CoreBroker_Kind}.String() + CoreBroker_KindAPIVersion = CoreBroker_Kind + "." + CRDGroupVersion.String() + CoreBroker_GroupVersionKind = CRDGroupVersion.WithKind(CoreBroker_Kind) +) + +func init() { + SchemeBuilder.Register(&CoreBroker{}, &CoreBrokerList{}) +} diff --git a/apis/iot/v1alpha1/zz_coredevice_terraformed.go b/apis/iot/v1alpha1/zz_coredevice_terraformed.go new file mode 100755 index 0000000..dddc4b5 --- /dev/null +++ b/apis/iot/v1alpha1/zz_coredevice_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CoreDevice +func (mg *CoreDevice) GetTerraformResourceType() string { + return "yandex_iot_core_device" +} + +// GetConnectionDetailsMapping for this CoreDevice +func (tr *CoreDevice) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"passwords[*]": "passwordsSecretRef[*]"} +} + +// GetObservation of this CoreDevice +func (tr *CoreDevice) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CoreDevice +func (tr *CoreDevice) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CoreDevice +func (tr *CoreDevice) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CoreDevice +func (tr *CoreDevice) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CoreDevice +func (tr *CoreDevice) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CoreDevice +func (tr *CoreDevice) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CoreDevice +func (tr *CoreDevice) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CoreDevice using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CoreDevice) LateInitialize(attrs []byte) (bool, error) { + params := &CoreDeviceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CoreDevice) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1alpha1/zz_coredevice_types.go b/apis/iot/v1alpha1/zz_coredevice_types.go new file mode 100755 index 0000000..78be7c2 --- /dev/null +++ b/apis/iot/v1alpha1/zz_coredevice_types.go @@ -0,0 +1,149 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CoreDeviceInitParameters struct { + + // A set of key/value aliases pairs to assign to the IoT Core Device + // +mapType=granular + Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // A set of certificate's fingerprints for the IoT Core Device + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Description of the IoT Core Device + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // IoT Core Device name used to define device + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` + + // IoT Core Registry ID for the IoT Core Device + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` +} + +type CoreDeviceObservation struct { + + // A set of key/value aliases pairs to assign to the IoT Core Device + // +mapType=granular + Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // A set of certificate's fingerprints for the IoT Core Device + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Creation timestamp of the IoT Core Device + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the IoT Core Device + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // IoT Core Device name used to define device + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // IoT Core Registry ID for the IoT Core Device + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` +} + +type CoreDeviceParameters struct { + + // A set of key/value aliases pairs to assign to the IoT Core Device + // +kubebuilder:validation:Optional + // +mapType=granular + Aliases map[string]*string `json:"aliases,omitempty" tf:"aliases,omitempty"` + + // A set of certificate's fingerprints for the IoT Core Device + // +kubebuilder:validation:Optional + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Description of the IoT Core Device + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // IoT Core Device name used to define device + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A set of passwords's id for the IoT Core Device + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` + + // IoT Core Registry ID for the IoT Core Device + // +kubebuilder:validation:Optional + RegistryID *string `json:"registryId,omitempty" tf:"registry_id,omitempty"` +} + +// CoreDeviceSpec defines the desired state of CoreDevice +type CoreDeviceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CoreDeviceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CoreDeviceInitParameters `json:"initProvider,omitempty"` +} + +// CoreDeviceStatus defines the observed state of CoreDevice. +type CoreDeviceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CoreDeviceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CoreDevice is the Schema for the CoreDevices API. Allows management of a Yandex.Cloud IoT Core Device. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type CoreDevice struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.registryId) || (has(self.initProvider) && has(self.initProvider.registryId))",message="spec.forProvider.registryId is a required parameter" + Spec CoreDeviceSpec `json:"spec"` + Status CoreDeviceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CoreDeviceList contains a list of CoreDevices +type CoreDeviceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CoreDevice `json:"items"` +} + +// Repository type metadata. +var ( + CoreDevice_Kind = "CoreDevice" + CoreDevice_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CoreDevice_Kind}.String() + CoreDevice_KindAPIVersion = CoreDevice_Kind + "." + CRDGroupVersion.String() + CoreDevice_GroupVersionKind = CRDGroupVersion.WithKind(CoreDevice_Kind) +) + +func init() { + SchemeBuilder.Register(&CoreDevice{}, &CoreDeviceList{}) +} diff --git a/apis/iot/v1alpha1/zz_coreregistry_terraformed.go b/apis/iot/v1alpha1/zz_coreregistry_terraformed.go new file mode 100755 index 0000000..e1be877 --- /dev/null +++ b/apis/iot/v1alpha1/zz_coreregistry_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CoreRegistry +func (mg *CoreRegistry) GetTerraformResourceType() string { + return "yandex_iot_core_registry" +} + +// GetConnectionDetailsMapping for this CoreRegistry +func (tr *CoreRegistry) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"passwords[*]": "passwordsSecretRef[*]"} +} + +// GetObservation of this CoreRegistry +func (tr *CoreRegistry) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CoreRegistry +func (tr *CoreRegistry) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CoreRegistry +func (tr *CoreRegistry) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CoreRegistry +func (tr *CoreRegistry) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CoreRegistry +func (tr *CoreRegistry) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CoreRegistry +func (tr *CoreRegistry) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CoreRegistry +func (tr *CoreRegistry) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CoreRegistry using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CoreRegistry) LateInitialize(attrs []byte) (bool, error) { + params := &CoreRegistryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CoreRegistry) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/iot/v1alpha1/zz_coreregistry_types.go b/apis/iot/v1alpha1/zz_coreregistry_types.go new file mode 100755 index 0000000..703d04c --- /dev/null +++ b/apis/iot/v1alpha1/zz_coreregistry_types.go @@ -0,0 +1,225 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CoreRegistryInitParameters struct { + + // A set of certificate's fingerprints for the IoT Core Registry + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Description of the IoT Core Registry + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder ID for the IoT Core Registry + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the IoT Core Registry. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging for IoT Core Registry + LogOptions []CoreRegistryLogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // IoT Core Device name used to define registry + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + Passwords []*string `json:"passwordsSecretRef,omitempty" tf:"-"` +} + +type CoreRegistryLogOptionsInitParameters struct { + + // Is logging for registry disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type CoreRegistryLogOptionsObservation struct { + + // Is logging for registry disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type CoreRegistryLogOptionsParameters struct { + + // Is logging for registry disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type CoreRegistryObservation struct { + + // A set of certificate's fingerprints for the IoT Core Registry + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Creation timestamp of the IoT Core Registry + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the IoT Core Registry + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder ID for the IoT Core Registry + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A set of key/value label pairs to assign to the IoT Core Registry. + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging for IoT Core Registry + LogOptions []CoreRegistryLogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // IoT Core Device name used to define registry + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CoreRegistryParameters struct { + + // A set of certificate's fingerprints for the IoT Core Registry + // +kubebuilder:validation:Optional + // +listType=set + Certificates []*string `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // Description of the IoT Core Registry + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Folder ID for the IoT Core Registry + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the IoT Core Registry. + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging for IoT Core Registry + // +kubebuilder:validation:Optional + LogOptions []CoreRegistryLogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // IoT Core Device name used to define registry + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A set of passwords's id for the IoT Core Registry + // +kubebuilder:validation:Optional + PasswordsSecretRef *[]v1.SecretKeySelector `json:"passwordsSecretRef,omitempty" tf:"-"` +} + +// CoreRegistrySpec defines the desired state of CoreRegistry +type CoreRegistrySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CoreRegistryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CoreRegistryInitParameters `json:"initProvider,omitempty"` +} + +// CoreRegistryStatus defines the observed state of CoreRegistry. +type CoreRegistryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CoreRegistryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CoreRegistry is the Schema for the CoreRegistrys API. Allows management of a Yandex.Cloud IoT Core Registry. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type CoreRegistry struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CoreRegistrySpec `json:"spec"` + Status CoreRegistryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CoreRegistryList contains a list of CoreRegistrys +type CoreRegistryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CoreRegistry `json:"items"` +} + +// Repository type metadata. +var ( + CoreRegistry_Kind = "CoreRegistry" + CoreRegistry_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CoreRegistry_Kind}.String() + CoreRegistry_KindAPIVersion = CoreRegistry_Kind + "." + CRDGroupVersion.String() + CoreRegistry_GroupVersionKind = CRDGroupVersion.WithKind(CoreRegistry_Kind) +) + +func init() { + SchemeBuilder.Register(&CoreRegistry{}, &CoreRegistryList{}) +} diff --git a/apis/iot/v1alpha1/zz_generated.conversion_hubs.go b/apis/iot/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..ff0317a --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,12 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *CoreBroker) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CoreDevice) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CoreRegistry) Hub() {} diff --git a/apis/iot/v1alpha1/zz_generated.deepcopy.go b/apis/iot/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..87e9e6d --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1167 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBroker) DeepCopyInto(out *CoreBroker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBroker. +func (in *CoreBroker) DeepCopy() *CoreBroker { + if in == nil { + return nil + } + out := new(CoreBroker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreBroker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerInitParameters) DeepCopyInto(out *CoreBrokerInitParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerInitParameters. +func (in *CoreBrokerInitParameters) DeepCopy() *CoreBrokerInitParameters { + if in == nil { + return nil + } + out := new(CoreBrokerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerList) DeepCopyInto(out *CoreBrokerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CoreBroker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerList. +func (in *CoreBrokerList) DeepCopy() *CoreBrokerList { + if in == nil { + return nil + } + out := new(CoreBrokerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreBrokerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerObservation) DeepCopyInto(out *CoreBrokerObservation) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerObservation. +func (in *CoreBrokerObservation) DeepCopy() *CoreBrokerObservation { + if in == nil { + return nil + } + out := new(CoreBrokerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerParameters) DeepCopyInto(out *CoreBrokerParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerParameters. +func (in *CoreBrokerParameters) DeepCopy() *CoreBrokerParameters { + if in == nil { + return nil + } + out := new(CoreBrokerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerSpec) DeepCopyInto(out *CoreBrokerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerSpec. +func (in *CoreBrokerSpec) DeepCopy() *CoreBrokerSpec { + if in == nil { + return nil + } + out := new(CoreBrokerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreBrokerStatus) DeepCopyInto(out *CoreBrokerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreBrokerStatus. +func (in *CoreBrokerStatus) DeepCopy() *CoreBrokerStatus { + if in == nil { + return nil + } + out := new(CoreBrokerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDevice) DeepCopyInto(out *CoreDevice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDevice. +func (in *CoreDevice) DeepCopy() *CoreDevice { + if in == nil { + return nil + } + out := new(CoreDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreDevice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceInitParameters) DeepCopyInto(out *CoreDeviceInitParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceInitParameters. +func (in *CoreDeviceInitParameters) DeepCopy() *CoreDeviceInitParameters { + if in == nil { + return nil + } + out := new(CoreDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceList) DeepCopyInto(out *CoreDeviceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CoreDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceList. +func (in *CoreDeviceList) DeepCopy() *CoreDeviceList { + if in == nil { + return nil + } + out := new(CoreDeviceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreDeviceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceObservation) DeepCopyInto(out *CoreDeviceObservation) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceObservation. +func (in *CoreDeviceObservation) DeepCopy() *CoreDeviceObservation { + if in == nil { + return nil + } + out := new(CoreDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceParameters) DeepCopyInto(out *CoreDeviceParameters) { + *out = *in + if in.Aliases != nil { + in, out := &in.Aliases, &out.Aliases + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } + if in.RegistryID != nil { + in, out := &in.RegistryID, &out.RegistryID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceParameters. +func (in *CoreDeviceParameters) DeepCopy() *CoreDeviceParameters { + if in == nil { + return nil + } + out := new(CoreDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceSpec) DeepCopyInto(out *CoreDeviceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceSpec. +func (in *CoreDeviceSpec) DeepCopy() *CoreDeviceSpec { + if in == nil { + return nil + } + out := new(CoreDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreDeviceStatus) DeepCopyInto(out *CoreDeviceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreDeviceStatus. +func (in *CoreDeviceStatus) DeepCopy() *CoreDeviceStatus { + if in == nil { + return nil + } + out := new(CoreDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistry) DeepCopyInto(out *CoreRegistry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistry. +func (in *CoreRegistry) DeepCopy() *CoreRegistry { + if in == nil { + return nil + } + out := new(CoreRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreRegistry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryInitParameters) DeepCopyInto(out *CoreRegistryInitParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]CoreRegistryLogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryInitParameters. +func (in *CoreRegistryInitParameters) DeepCopy() *CoreRegistryInitParameters { + if in == nil { + return nil + } + out := new(CoreRegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryList) DeepCopyInto(out *CoreRegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CoreRegistry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryList. +func (in *CoreRegistryList) DeepCopy() *CoreRegistryList { + if in == nil { + return nil + } + out := new(CoreRegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CoreRegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryLogOptionsInitParameters) DeepCopyInto(out *CoreRegistryLogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryLogOptionsInitParameters. +func (in *CoreRegistryLogOptionsInitParameters) DeepCopy() *CoreRegistryLogOptionsInitParameters { + if in == nil { + return nil + } + out := new(CoreRegistryLogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryLogOptionsObservation) DeepCopyInto(out *CoreRegistryLogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryLogOptionsObservation. +func (in *CoreRegistryLogOptionsObservation) DeepCopy() *CoreRegistryLogOptionsObservation { + if in == nil { + return nil + } + out := new(CoreRegistryLogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryLogOptionsParameters) DeepCopyInto(out *CoreRegistryLogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryLogOptionsParameters. +func (in *CoreRegistryLogOptionsParameters) DeepCopy() *CoreRegistryLogOptionsParameters { + if in == nil { + return nil + } + out := new(CoreRegistryLogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryObservation) DeepCopyInto(out *CoreRegistryObservation) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]CoreRegistryLogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryObservation. +func (in *CoreRegistryObservation) DeepCopy() *CoreRegistryObservation { + if in == nil { + return nil + } + out := new(CoreRegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryParameters) DeepCopyInto(out *CoreRegistryParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]CoreRegistryLogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordsSecretRef != nil { + in, out := &in.PasswordsSecretRef, &out.PasswordsSecretRef + *out = new([]v1.SecretKeySelector) + if **in != nil { + in, out := *in, *out + *out = make([]v1.SecretKeySelector, len(*in)) + copy(*out, *in) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryParameters. +func (in *CoreRegistryParameters) DeepCopy() *CoreRegistryParameters { + if in == nil { + return nil + } + out := new(CoreRegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistrySpec) DeepCopyInto(out *CoreRegistrySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistrySpec. +func (in *CoreRegistrySpec) DeepCopy() *CoreRegistrySpec { + if in == nil { + return nil + } + out := new(CoreRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CoreRegistryStatus) DeepCopyInto(out *CoreRegistryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreRegistryStatus. +func (in *CoreRegistryStatus) DeepCopy() *CoreRegistryStatus { + if in == nil { + return nil + } + out := new(CoreRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/iot/v1alpha1/zz_generated.managed.go b/apis/iot/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..0a6b66c --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.managed.go @@ -0,0 +1,185 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CoreBroker. +func (mg *CoreBroker) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CoreBroker. +func (mg *CoreBroker) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CoreBroker. +func (mg *CoreBroker) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CoreBroker. +func (mg *CoreBroker) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CoreBroker. +func (mg *CoreBroker) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CoreBroker. +func (mg *CoreBroker) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CoreBroker. +func (mg *CoreBroker) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CoreBroker. +func (mg *CoreBroker) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CoreBroker. +func (mg *CoreBroker) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CoreBroker. +func (mg *CoreBroker) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CoreBroker. +func (mg *CoreBroker) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CoreBroker. +func (mg *CoreBroker) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CoreDevice. +func (mg *CoreDevice) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CoreDevice. +func (mg *CoreDevice) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CoreDevice. +func (mg *CoreDevice) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CoreDevice. +func (mg *CoreDevice) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CoreDevice. +func (mg *CoreDevice) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CoreDevice. +func (mg *CoreDevice) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CoreDevice. +func (mg *CoreDevice) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CoreDevice. +func (mg *CoreDevice) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CoreDevice. +func (mg *CoreDevice) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CoreDevice. +func (mg *CoreDevice) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CoreDevice. +func (mg *CoreDevice) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CoreDevice. +func (mg *CoreDevice) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CoreRegistry. +func (mg *CoreRegistry) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CoreRegistry. +func (mg *CoreRegistry) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CoreRegistry. +func (mg *CoreRegistry) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CoreRegistry. +func (mg *CoreRegistry) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CoreRegistry. +func (mg *CoreRegistry) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CoreRegistry. +func (mg *CoreRegistry) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CoreRegistry. +func (mg *CoreRegistry) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CoreRegistry. +func (mg *CoreRegistry) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CoreRegistry. +func (mg *CoreRegistry) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CoreRegistry. +func (mg *CoreRegistry) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CoreRegistry. +func (mg *CoreRegistry) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CoreRegistry. +func (mg *CoreRegistry) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/iot/v1alpha1/zz_generated.managedlist.go b/apis/iot/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..34ffeb2 --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,32 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CoreBrokerList. +func (l *CoreBrokerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CoreDeviceList. +func (l *CoreDeviceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CoreRegistryList. +func (l *CoreRegistryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/iot/v1alpha1/zz_generated.resolvers.go b/apis/iot/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..ec47e67 --- /dev/null +++ b/apis/iot/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,95 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this CoreBroker. +func (mg *CoreBroker) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CoreRegistry. +func (mg *CoreRegistry) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/iot/v1alpha1/zz_groupversion_info.go b/apis/iot/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..d7367eb --- /dev/null +++ b/apis/iot/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=iot.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "iot.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/mdb/v1alpha1/zz_generated.deepcopy.go b/apis/mdb/v1alpha1/zz_generated.deepcopy.go index 3999a65..36a8134 100644 --- a/apis/mdb/v1alpha1/zz_generated.deepcopy.go +++ b/apis/mdb/v1alpha1/zz_generated.deepcopy.go @@ -12336,6 +12336,11 @@ func (in *MongodbDatabaseInitParameters) DeepCopyInto(out *MongodbDatabaseInitPa *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseInitParameters. @@ -12393,6 +12398,11 @@ func (in *MongodbDatabaseObservation) DeepCopyInto(out *MongodbDatabaseObservati *out = new(string) **out = **in } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseObservation. @@ -12423,6 +12433,11 @@ func (in *MongodbDatabaseParameters) DeepCopyInto(out *MongodbDatabaseParameters *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbDatabaseParameters. @@ -12515,6 +12530,11 @@ func (in *MongodbUserInitParameters) DeepCopyInto(out *MongodbUserInitParameters *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } out.PasswordSecretRef = in.PasswordSecretRef if in.Permission != nil { in, out := &in.Permission, &out.Permission @@ -12580,6 +12600,11 @@ func (in *MongodbUserObservation) DeepCopyInto(out *MongodbUserObservation) { *out = new(string) **out = **in } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } if in.Permission != nil { in, out := &in.Permission, &out.Permission *out = make([]MongodbUserPermissionObservation, len(*in)) @@ -12617,6 +12642,11 @@ func (in *MongodbUserParameters) DeepCopyInto(out *MongodbUserParameters) { *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } out.PasswordSecretRef = in.PasswordSecretRef if in.Permission != nil { in, out := &in.Permission, &out.Permission diff --git a/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go b/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go index 124ceaf..82cf2d4 100755 --- a/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go +++ b/apis/mdb/v1alpha1/zz_mongodbdatabase_types.go @@ -21,12 +21,18 @@ type MongodbDatabaseInitParameters struct { // Selector for a MongodbCluster to populate clusterId. // +kubebuilder:validation:Optional ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } type MongodbDatabaseObservation struct { ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the database. + Name *string `json:"name,omitempty" tf:"name,omitempty"` } type MongodbDatabaseParameters struct { @@ -42,6 +48,10 @@ type MongodbDatabaseParameters struct { // Selector for a MongodbCluster to populate clusterId. // +kubebuilder:validation:Optional ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + + // The name of the database. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` } // MongodbDatabaseSpec defines the desired state of MongodbDatabase @@ -80,8 +90,9 @@ type MongodbDatabaseStatus struct { type MongodbDatabase struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec MongodbDatabaseSpec `json:"spec"` - Status MongodbDatabaseStatus `json:"status,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MongodbDatabaseSpec `json:"spec"` + Status MongodbDatabaseStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true diff --git a/apis/mdb/v1alpha1/zz_mongodbuser_types.go b/apis/mdb/v1alpha1/zz_mongodbuser_types.go index 26e90e2..fd10406 100755 --- a/apis/mdb/v1alpha1/zz_mongodbuser_types.go +++ b/apis/mdb/v1alpha1/zz_mongodbuser_types.go @@ -22,6 +22,9 @@ type MongodbUserInitParameters struct { // +kubebuilder:validation:Optional ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The password of the user. PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` @@ -34,6 +37,9 @@ type MongodbUserObservation struct { ID *string `json:"id,omitempty" tf:"id,omitempty"` + // The name of the user. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + // Set of permissions granted to the user. The structure is documented below. Permission []MongodbUserPermissionObservation `json:"permission,omitempty" tf:"permission,omitempty"` } @@ -52,6 +58,10 @@ type MongodbUserParameters struct { // +kubebuilder:validation:Optional ClusterIDSelector *v1.Selector `json:"clusterIdSelector,omitempty" tf:"-"` + // The name of the user. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + // The password of the user. // +kubebuilder:validation:Optional PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` @@ -129,6 +139,7 @@ type MongodbUserStatus struct { type MongodbUser struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" Spec MongodbUserSpec `json:"spec"` Status MongodbUserStatus `json:"status,omitempty"` diff --git a/apis/serverless/v1alpha1/zz_container_terraformed.go b/apis/serverless/v1alpha1/zz_container_terraformed.go new file mode 100755 index 0000000..8f3000f --- /dev/null +++ b/apis/serverless/v1alpha1/zz_container_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Container +func (mg *Container) GetTerraformResourceType() string { + return "yandex_serverless_container" +} + +// GetConnectionDetailsMapping for this Container +func (tr *Container) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Container +func (tr *Container) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Container +func (tr *Container) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Container +func (tr *Container) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Container +func (tr *Container) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Container +func (tr *Container) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Container +func (tr *Container) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Container +func (tr *Container) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Container using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Container) LateInitialize(attrs []byte) (bool, error) { + params := &ContainerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Container) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/serverless/v1alpha1/zz_container_types.go b/apis/serverless/v1alpha1/zz_container_types.go new file mode 100755 index 0000000..ecd3242 --- /dev/null +++ b/apis/serverless/v1alpha1/zz_container_types.go @@ -0,0 +1,513 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectivityInitParameters struct { + + // Network the revision will have access to + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` +} + +type ConnectivityObservation struct { + + // Network the revision will have access to + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` +} + +type ConnectivityParameters struct { + + // Network the revision will have access to + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId" tf:"network_id,omitempty"` +} + +type ContainerInitParameters struct { + + // Concurrency of Yandex Cloud Serverless Container + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + + // Network access. If specified the revision will be attached to specified network + Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Core fraction (0...100) of the Yandex Cloud Serverless Container + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // Description of the Yandex Cloud Serverless Container + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud Serverless Container + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Revision deployment image for Yandex Cloud Serverless Container + Image []ImageInitParameters `json:"image,omitempty" tf:"image,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Serverless Container + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud Serverless Container + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + // Container memory in megabytes, should be aligned to 128 + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + + // Yandex Cloud Serverless Container name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Secrets for Yandex Cloud Serverless Container + Secrets []SecretsInitParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + + // Service account ID for Yandex Cloud Serverless Container + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Storage mounts for Yandex Cloud Serverless Container + StorageMounts []StorageMountsInitParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` +} + +type ContainerObservation struct { + + // Concurrency of Yandex Cloud Serverless Container + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + + // Network access. If specified the revision will be attached to specified network + Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Core fraction (0...100) of the Yandex Cloud Serverless Container + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // Creation timestamp of the Yandex Cloud Serverless Container + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the Yandex Cloud Serverless Container + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud Serverless Container + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Secret's id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Revision deployment image for Yandex Cloud Serverless Container + Image []ImageObservation `json:"image,omitempty" tf:"image,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Serverless Container + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud Serverless Container + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + // Container memory in megabytes, should be aligned to 128 + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + + // Yandex Cloud Serverless Container name + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Last revision ID of the Yandex Cloud Serverless Container + RevisionID *string `json:"revisionId,omitempty" tf:"revision_id,omitempty"` + + // Secrets for Yandex Cloud Serverless Container + Secrets []SecretsObservation `json:"secrets,omitempty" tf:"secrets,omitempty"` + + // Service account ID for Yandex Cloud Serverless Container + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Storage mounts for Yandex Cloud Serverless Container + StorageMounts []StorageMountsObservation `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + + // Invoke URL for the Yandex Cloud Serverless Container + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ContainerParameters struct { + + // Concurrency of Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + + // Network access. If specified the revision will be attached to specified network + // +kubebuilder:validation:Optional + Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Core fraction (0...100) of the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + CoreFraction *float64 `json:"coreFraction,omitempty" tf:"core_fraction,omitempty"` + + // +kubebuilder:validation:Optional + Cores *float64 `json:"cores,omitempty" tf:"cores,omitempty"` + + // Description of the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Execution timeout in seconds (duration format) for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud Serverless Container + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // Revision deployment image for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Image []ImageParameters `json:"image,omitempty" tf:"image,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + // Container memory in megabytes, should be aligned to 128 + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + + // Yandex Cloud Serverless Container name + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Secrets for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Secrets []SecretsParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + + // Service account ID for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Storage mounts for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + StorageMounts []StorageMountsParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` +} + +type ImageInitParameters struct { + + // List of arguments for Yandex Cloud Serverless Container + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // List of commands for Yandex Cloud Serverless Container + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // Digest of image that will be deployed as Yandex Cloud Serverless Container. + // If presented, should be equal to digest that will be resolved at server side by URL. + // Container will be updated on digest change even if image.0.url stays the same. + // If field not specified then its value will be computed. + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + + // A set of key/value environment variable pairs for Yandex Cloud Serverless Container + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // Invoke URL for the Yandex Cloud Serverless Container + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Working directory for Yandex Cloud Serverless Container + WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` +} + +type ImageObservation struct { + + // List of arguments for Yandex Cloud Serverless Container + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // List of commands for Yandex Cloud Serverless Container + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // Digest of image that will be deployed as Yandex Cloud Serverless Container. + // If presented, should be equal to digest that will be resolved at server side by URL. + // Container will be updated on digest change even if image.0.url stays the same. + // If field not specified then its value will be computed. + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + + // A set of key/value environment variable pairs for Yandex Cloud Serverless Container + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // Invoke URL for the Yandex Cloud Serverless Container + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Working directory for Yandex Cloud Serverless Container + WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` +} + +type ImageParameters struct { + + // List of arguments for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // List of commands for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // Digest of image that will be deployed as Yandex Cloud Serverless Container. + // If presented, should be equal to digest that will be resolved at server side by URL. + // Container will be updated on digest change even if image.0.url stays the same. + // If field not specified then its value will be computed. + // +kubebuilder:validation:Optional + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + + // A set of key/value environment variable pairs for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // Invoke URL for the Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` + + // Working directory for Yandex Cloud Serverless Container + // +kubebuilder:validation:Optional + WorkDir *string `json:"workDir,omitempty" tf:"work_dir,omitempty"` +} + +type LogOptionsInitParameters struct { + + // Is logging from container disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsObservation struct { + + // Is logging from container disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsParameters struct { + + // Is logging from container disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Log entries are written to default log group for specified folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type SecretsInitParameters struct { + + // Container's environment variable in which secret's value will be stored. + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Secret's id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Secret's entries key which value will be stored in environment variable. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Secret's version id. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SecretsObservation struct { + + // Container's environment variable in which secret's value will be stored. + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Secret's id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Secret's entries key which value will be stored in environment variable. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Secret's version id. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SecretsParameters struct { + + // Container's environment variable in which secret's value will be stored. + // +kubebuilder:validation:Optional + EnvironmentVariable *string `json:"environmentVariable" tf:"environment_variable,omitempty"` + + // Secret's id. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Secret's entries key which value will be stored in environment variable. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Secret's version id. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId" tf:"version_id,omitempty"` +} + +type StorageMountsInitParameters struct { + + // Name of the mounting bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Path inside the container to access the directory in which the bucket is mounted. + MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` + + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Mount the bucket in read-only mode. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type StorageMountsObservation struct { + + // Name of the mounting bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Path inside the container to access the directory in which the bucket is mounted. + MountPointPath *string `json:"mountPointPath,omitempty" tf:"mount_point_path,omitempty"` + + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Mount the bucket in read-only mode. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type StorageMountsParameters struct { + + // Name of the mounting bucket. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // Path inside the container to access the directory in which the bucket is mounted. + // +kubebuilder:validation:Optional + MountPointPath *string `json:"mountPointPath" tf:"mount_point_path,omitempty"` + + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Mount the bucket in read-only mode. + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +// ContainerSpec defines the desired state of Container +type ContainerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ContainerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ContainerInitParameters `json:"initProvider,omitempty"` +} + +// ContainerStatus defines the observed state of Container. +type ContainerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ContainerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Container is the Schema for the Containers API. Allows management of a Yandex Cloud Serverless Container. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Container struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.image) || (has(self.initProvider) && has(self.initProvider.image))",message="spec.forProvider.image is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ContainerSpec `json:"spec"` + Status ContainerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ContainerList contains a list of Containers +type ContainerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Container `json:"items"` +} + +// Repository type metadata. +var ( + Container_Kind = "Container" + Container_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Container_Kind}.String() + Container_KindAPIVersion = Container_Kind + "." + CRDGroupVersion.String() + Container_GroupVersionKind = CRDGroupVersion.WithKind(Container_Kind) +) + +func init() { + SchemeBuilder.Register(&Container{}, &ContainerList{}) +} diff --git a/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go b/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go new file mode 100755 index 0000000..0c9500d --- /dev/null +++ b/apis/serverless/v1alpha1/zz_containeriambinding_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ContainerIAMBinding +func (mg *ContainerIAMBinding) GetTerraformResourceType() string { + return "yandex_serverless_container_iam_binding" +} + +// GetConnectionDetailsMapping for this ContainerIAMBinding +func (tr *ContainerIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ContainerIAMBinding +func (tr *ContainerIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ContainerIAMBinding +func (tr *ContainerIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ContainerIAMBinding +func (tr *ContainerIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ContainerIAMBinding +func (tr *ContainerIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ContainerIAMBinding +func (tr *ContainerIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ContainerIAMBinding +func (tr *ContainerIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ContainerIAMBinding +func (tr *ContainerIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ContainerIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ContainerIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &ContainerIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ContainerIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/serverless/v1alpha1/zz_containeriambinding_types.go b/apis/serverless/v1alpha1/zz_containeriambinding_types.go new file mode 100755 index 0000000..8ff5dd3 --- /dev/null +++ b/apis/serverless/v1alpha1/zz_containeriambinding_types.go @@ -0,0 +1,128 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContainerIAMBindingInitParameters struct { + + // The Yandex Serverless Container ID to apply a binding to. + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // Identities that will be granted the privilege in role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ContainerIAMBindingObservation struct { + + // The Yandex Serverless Container ID to apply a binding to. + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Identities that will be granted the privilege in role. + // Each entry can have one of the following values: + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +type ContainerIAMBindingParameters struct { + + // The Yandex Serverless Container ID to apply a binding to. + // +kubebuilder:validation:Optional + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // Identities that will be granted the privilege in role. + // Each entry can have one of the following values: + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // The role that should be applied. + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` + + // +kubebuilder:validation:Optional + SleepAfter *float64 `json:"sleepAfter,omitempty" tf:"sleep_after,omitempty"` +} + +// ContainerIAMBindingSpec defines the desired state of ContainerIAMBinding +type ContainerIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ContainerIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ContainerIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// ContainerIAMBindingStatus defines the observed state of ContainerIAMBinding. +type ContainerIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ContainerIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// ContainerIAMBinding is the Schema for the ContainerIAMBindings API. Allows management of a single IAM binding for a +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type ContainerIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.containerId) || (has(self.initProvider) && has(self.initProvider.containerId))",message="spec.forProvider.containerId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec ContainerIAMBindingSpec `json:"spec"` + Status ContainerIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ContainerIAMBindingList contains a list of ContainerIAMBindings +type ContainerIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ContainerIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + ContainerIAMBinding_Kind = "ContainerIAMBinding" + ContainerIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ContainerIAMBinding_Kind}.String() + ContainerIAMBinding_KindAPIVersion = ContainerIAMBinding_Kind + "." + CRDGroupVersion.String() + ContainerIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(ContainerIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&ContainerIAMBinding{}, &ContainerIAMBindingList{}) +} diff --git a/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go b/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..e99fddc --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,9 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Container) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ContainerIAMBinding) Hub() {} diff --git a/apis/serverless/v1alpha1/zz_generated.deepcopy.go b/apis/serverless/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..2a4734d --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1278 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInitParameters) DeepCopyInto(out *ConnectivityInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInitParameters. +func (in *ConnectivityInitParameters) DeepCopy() *ConnectivityInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityObservation) DeepCopyInto(out *ConnectivityObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityObservation. +func (in *ConnectivityObservation) DeepCopy() *ConnectivityObservation { + if in == nil { + return nil + } + out := new(ConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityParameters) DeepCopyInto(out *ConnectivityParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityParameters. +func (in *ConnectivityParameters) DeepCopy() *ConnectivityParameters { + if in == nil { + return nil + } + out := new(ConnectivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Container) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBinding) DeepCopyInto(out *ContainerIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBinding. +func (in *ContainerIAMBinding) DeepCopy() *ContainerIAMBinding { + if in == nil { + return nil + } + out := new(ContainerIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingInitParameters) DeepCopyInto(out *ContainerIAMBindingInitParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingInitParameters. +func (in *ContainerIAMBindingInitParameters) DeepCopy() *ContainerIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(ContainerIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingList) DeepCopyInto(out *ContainerIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContainerIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingList. +func (in *ContainerIAMBindingList) DeepCopy() *ContainerIAMBindingList { + if in == nil { + return nil + } + out := new(ContainerIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingObservation) DeepCopyInto(out *ContainerIAMBindingObservation) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingObservation. +func (in *ContainerIAMBindingObservation) DeepCopy() *ContainerIAMBindingObservation { + if in == nil { + return nil + } + out := new(ContainerIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingParameters) DeepCopyInto(out *ContainerIAMBindingParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.SleepAfter != nil { + in, out := &in.SleepAfter, &out.SleepAfter + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingParameters. +func (in *ContainerIAMBindingParameters) DeepCopy() *ContainerIAMBindingParameters { + if in == nil { + return nil + } + out := new(ContainerIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingSpec) DeepCopyInto(out *ContainerIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingSpec. +func (in *ContainerIAMBindingSpec) DeepCopy() *ContainerIAMBindingSpec { + if in == nil { + return nil + } + out := new(ContainerIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerIAMBindingStatus) DeepCopyInto(out *ContainerIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerIAMBindingStatus. +func (in *ContainerIAMBindingStatus) DeepCopy() *ContainerIAMBindingStatus { + if in == nil { + return nil + } + out := new(ContainerIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = make([]ImageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerList) DeepCopyInto(out *ContainerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerList. +func (in *ContainerList) DeepCopy() *ContainerList { + if in == nil { + return nil + } + out := new(ContainerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = make([]ImageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RevisionID != nil { + in, out := &in.RevisionID, &out.RevisionID + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CoreFraction != nil { + in, out := &in.CoreFraction, &out.CoreFraction + *out = new(float64) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(float64) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = make([]ImageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSpec. +func (in *ContainerSpec) DeepCopy() *ContainerSpec { + if in == nil { + return nil + } + out := new(ContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkDir != nil { + in, out := &in.WorkDir, &out.WorkDir + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInitParameters. +func (in *ImageInitParameters) DeepCopy() *ImageInitParameters { + if in == nil { + return nil + } + out := new(ImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageObservation) DeepCopyInto(out *ImageObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkDir != nil { + in, out := &in.WorkDir, &out.WorkDir + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageObservation. +func (in *ImageObservation) DeepCopy() *ImageObservation { + if in == nil { + return nil + } + out := new(ImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageParameters) DeepCopyInto(out *ImageParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.WorkDir != nil { + in, out := &in.WorkDir, &out.WorkDir + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParameters. +func (in *ImageParameters) DeepCopy() *ImageParameters { + if in == nil { + return nil + } + out := new(ImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsInitParameters) DeepCopyInto(out *SecretsInitParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsInitParameters. +func (in *SecretsInitParameters) DeepCopy() *SecretsInitParameters { + if in == nil { + return nil + } + out := new(SecretsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsObservation) DeepCopyInto(out *SecretsObservation) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsObservation. +func (in *SecretsObservation) DeepCopy() *SecretsObservation { + if in == nil { + return nil + } + out := new(SecretsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsParameters) DeepCopyInto(out *SecretsParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsParameters. +func (in *SecretsParameters) DeepCopy() *SecretsParameters { + if in == nil { + return nil + } + out := new(SecretsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsInitParameters) DeepCopyInto(out *StorageMountsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsInitParameters. +func (in *StorageMountsInitParameters) DeepCopy() *StorageMountsInitParameters { + if in == nil { + return nil + } + out := new(StorageMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsObservation) DeepCopyInto(out *StorageMountsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsObservation. +func (in *StorageMountsObservation) DeepCopy() *StorageMountsObservation { + if in == nil { + return nil + } + out := new(StorageMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsParameters) DeepCopyInto(out *StorageMountsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointPath != nil { + in, out := &in.MountPointPath, &out.MountPointPath + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsParameters. +func (in *StorageMountsParameters) DeepCopy() *StorageMountsParameters { + if in == nil { + return nil + } + out := new(StorageMountsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/serverless/v1alpha1/zz_generated.managed.go b/apis/serverless/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..426b6d4 --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.managed.go @@ -0,0 +1,125 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Container. +func (mg *Container) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Container. +func (mg *Container) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Container. +func (mg *Container) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Container. +func (mg *Container) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Container. +func (mg *Container) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Container. +func (mg *Container) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Container. +func (mg *Container) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Container. +func (mg *Container) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Container. +func (mg *Container) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Container. +func (mg *Container) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Container. +func (mg *Container) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Container. +func (mg *Container) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ContainerIAMBinding. +func (mg *ContainerIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/serverless/v1alpha1/zz_generated.managedlist.go b/apis/serverless/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..8c663a7 --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,23 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ContainerIAMBindingList. +func (l *ContainerIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ContainerList. +func (l *ContainerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/serverless/v1alpha1/zz_generated.resolvers.go b/apis/serverless/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..9a72b67 --- /dev/null +++ b/apis/serverless/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Container. +func (mg *Container) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/serverless/v1alpha1/zz_groupversion_info.go b/apis/serverless/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..cff40fb --- /dev/null +++ b/apis/serverless/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=serverless.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "serverless.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/yandex/v1alpha1/zz_function_terraformed.go b/apis/yandex/v1alpha1/zz_function_terraformed.go new file mode 100755 index 0000000..74edffd --- /dev/null +++ b/apis/yandex/v1alpha1/zz_function_terraformed.go @@ -0,0 +1,125 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Function +func (mg *Function) GetTerraformResourceType() string { + return "yandex_function" +} + +// GetConnectionDetailsMapping for this Function +func (tr *Function) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Function +func (tr *Function) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Function +func (tr *Function) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Function +func (tr *Function) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Function +func (tr *Function) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Function +func (tr *Function) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Function +func (tr *Function) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Function +func (tr *Function) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Function using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Function) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Function) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/yandex/v1alpha1/zz_function_types.go b/apis/yandex/v1alpha1/zz_function_types.go new file mode 100755 index 0000000..a8730c1 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_function_types.go @@ -0,0 +1,668 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AsyncInvocationInitParameters struct { + + // Maximum number of retries for async invocation + RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` + + // Service account used for async invocation + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Target for unsuccessful async invocation + YmqFailureTarget []YmqFailureTargetInitParameters `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` + + // Target for successful async invocation + YmqSuccessTarget []YmqSuccessTargetInitParameters `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` +} + +type AsyncInvocationObservation struct { + + // Maximum number of retries for async invocation + RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` + + // Service account used for async invocation + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Target for unsuccessful async invocation + YmqFailureTarget []YmqFailureTargetObservation `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` + + // Target for successful async invocation + YmqSuccessTarget []YmqSuccessTargetObservation `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` +} + +type AsyncInvocationParameters struct { + + // Maximum number of retries for async invocation + // +kubebuilder:validation:Optional + RetriesCount *float64 `json:"retriesCount,omitempty" tf:"retries_count,omitempty"` + + // Service account used for async invocation + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Target for unsuccessful async invocation + // +kubebuilder:validation:Optional + YmqFailureTarget []YmqFailureTargetParameters `json:"ymqFailureTarget,omitempty" tf:"ymq_failure_target,omitempty"` + + // Target for successful async invocation + // +kubebuilder:validation:Optional + YmqSuccessTarget []YmqSuccessTargetParameters `json:"ymqSuccessTarget,omitempty" tf:"ymq_success_target,omitempty"` +} + +type ConnectivityInitParameters struct { + + // Network the version will have access to. It's essential to specify network with subnets in all availability zones. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` +} + +type ConnectivityObservation struct { + + // Network the version will have access to. It's essential to specify network with subnets in all availability zones. + NetworkID *string `json:"networkId,omitempty" tf:"network_id,omitempty"` +} + +type ConnectivityParameters struct { + + // Network the version will have access to. It's essential to specify network with subnets in all availability zones. + // +kubebuilder:validation:Optional + NetworkID *string `json:"networkId" tf:"network_id,omitempty"` +} + +type ContentInitParameters struct { + + // Filename to zip archive for the version. + ZipFilename *string `json:"zipFilename,omitempty" tf:"zip_filename,omitempty"` +} + +type ContentObservation struct { + + // Filename to zip archive for the version. + ZipFilename *string `json:"zipFilename,omitempty" tf:"zip_filename,omitempty"` +} + +type ContentParameters struct { + + // Filename to zip archive for the version. + // +kubebuilder:validation:Optional + ZipFilename *string `json:"zipFilename" tf:"zip_filename,omitempty"` +} + +type FunctionInitParameters struct { + + // Config for asynchronous invocations of Yandex Cloud Function. + AsyncInvocation []AsyncInvocationInitParameters `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` + + // The maximum number of requests processed by a function instance at the same time. + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + + // Function version connectivity. If specified the version will be attached to specified network. + Connectivity []ConnectivityInitParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified. + Content []ContentInitParameters `json:"content,omitempty" tf:"content,omitempty"` + + // Description of the Yandex Cloud Function + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Entrypoint for Yandex Cloud Function + Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` + + // A set of key/value environment variables for Yandex Cloud Function + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // Execution timeout in seconds for Yandex Cloud Function + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud Function + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Cloud Function + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud Function. + LogOptions []LogOptionsInitParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Function + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + + // Yandex Cloud Function name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified. + Package []PackageInitParameters `json:"package,omitempty" tf:"package,omitempty"` + + // Runtime for Yandex Cloud Function + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Secrets for Yandex Cloud Function. + Secrets []SecretsInitParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Storage mounts for Yandex Cloud Function. + StorageMounts []StorageMountsInitParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + + // Tags for Yandex Cloud Function. Tag "$latest" isn't returned. + // +listType=set + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Tmpfs size for Yandex Cloud Function. + TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` + + // User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. + UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` +} + +type FunctionObservation struct { + + // Config for asynchronous invocations of Yandex Cloud Function. + AsyncInvocation []AsyncInvocationObservation `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` + + // The maximum number of requests processed by a function instance at the same time. + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + + // Function version connectivity. If specified the version will be attached to specified network. + Connectivity []ConnectivityObservation `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified. + Content []ContentObservation `json:"content,omitempty" tf:"content,omitempty"` + + // Creation timestamp of the Yandex Cloud Function. + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // Description of the Yandex Cloud Function + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Entrypoint for Yandex Cloud Function + Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` + + // A set of key/value environment variables for Yandex Cloud Function + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // Execution timeout in seconds for Yandex Cloud Function + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud Function + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Secret's id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Image size for Yandex Cloud Function. + ImageSize *float64 `json:"imageSize,omitempty" tf:"image_size,omitempty"` + + // A set of key/value label pairs to assign to the Yandex Cloud Function + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud Function. + LogOptions []LogOptionsObservation `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Loggroup ID size for Yandex Cloud Function. + LoggroupID *string `json:"loggroupId,omitempty" tf:"loggroup_id,omitempty"` + + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Function + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + + // Yandex Cloud Function name used to define trigger + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified. + Package []PackageObservation `json:"package,omitempty" tf:"package,omitempty"` + + // Runtime for Yandex Cloud Function + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Secrets for Yandex Cloud Function. + Secrets []SecretsObservation `json:"secrets,omitempty" tf:"secrets,omitempty"` + + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Storage mounts for Yandex Cloud Function. + StorageMounts []StorageMountsObservation `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + + // Tags for Yandex Cloud Function. Tag "$latest" isn't returned. + // +listType=set + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Tmpfs size for Yandex Cloud Function. + TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` + + // User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. + UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` + + // Version for Yandex Cloud Function. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionParameters struct { + + // Config for asynchronous invocations of Yandex Cloud Function. + // +kubebuilder:validation:Optional + AsyncInvocation []AsyncInvocationParameters `json:"asyncInvocation,omitempty" tf:"async_invocation,omitempty"` + + // The maximum number of requests processed by a function instance at the same time. + // +kubebuilder:validation:Optional + Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` + + // Function version connectivity. If specified the version will be attached to specified network. + // +kubebuilder:validation:Optional + Connectivity []ConnectivityParameters `json:"connectivity,omitempty" tf:"connectivity,omitempty"` + + // Version deployment content for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified. + // +kubebuilder:validation:Optional + Content []ContentParameters `json:"content,omitempty" tf:"content,omitempty"` + + // Description of the Yandex Cloud Function + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Entrypoint for Yandex Cloud Function + // +kubebuilder:validation:Optional + Entrypoint *string `json:"entrypoint,omitempty" tf:"entrypoint,omitempty"` + + // A set of key/value environment variables for Yandex Cloud Function + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // Execution timeout in seconds for Yandex Cloud Function + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` + + // Folder ID for the Yandex Cloud Function + // +crossplane:generate:reference:type=github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1.Folder + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Reference to a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDRef *v1.Reference `json:"folderIdRef,omitempty" tf:"-"` + + // Selector for a Folder in resourcemanager to populate folderId. + // +kubebuilder:validation:Optional + FolderIDSelector *v1.Selector `json:"folderIdSelector,omitempty" tf:"-"` + + // A set of key/value label pairs to assign to the Yandex Cloud Function + // +kubebuilder:validation:Optional + // +mapType=granular + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Options for logging from Yandex Cloud Function. + // +kubebuilder:validation:Optional + LogOptions []LogOptionsParameters `json:"logOptions,omitempty" tf:"log_options,omitempty"` + + // Memory in megabytes (aligned to 128MB) for Yandex Cloud Function + // +kubebuilder:validation:Optional + Memory *float64 `json:"memory,omitempty" tf:"memory,omitempty"` + + // Yandex Cloud Function name used to define trigger + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Version deployment package for Yandex Cloud Function code. Can be only one package or content section. Either package or content section must be specified. + // +kubebuilder:validation:Optional + Package []PackageParameters `json:"package,omitempty" tf:"package,omitempty"` + + // Runtime for Yandex Cloud Function + // +kubebuilder:validation:Optional + Runtime *string `json:"runtime,omitempty" tf:"runtime,omitempty"` + + // Secrets for Yandex Cloud Function. + // +kubebuilder:validation:Optional + Secrets []SecretsParameters `json:"secrets,omitempty" tf:"secrets,omitempty"` + + // Service account ID for Yandex Cloud Function + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` + + // Storage mounts for Yandex Cloud Function. + // +kubebuilder:validation:Optional + StorageMounts []StorageMountsParameters `json:"storageMounts,omitempty" tf:"storage_mounts,omitempty"` + + // Tags for Yandex Cloud Function. Tag "$latest" isn't returned. + // +kubebuilder:validation:Optional + // +listType=set + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Tmpfs size for Yandex Cloud Function. + // +kubebuilder:validation:Optional + TmpfsSize *float64 `json:"tmpfsSize,omitempty" tf:"tmpfs_size,omitempty"` + + // User-defined string for current function version. User must change this string any times when function changed. Function will be updated when hash is changed. + // +kubebuilder:validation:Optional + UserHash *string `json:"userHash,omitempty" tf:"user_hash,omitempty"` +} + +type LogOptionsInitParameters struct { + + // Is logging from function disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Folder ID for the Yandex Cloud Function + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsObservation struct { + + // Is logging from function disabled + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Folder ID for the Yandex Cloud Function + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type LogOptionsParameters struct { + + // Is logging from function disabled + // +kubebuilder:validation:Optional + Disabled *bool `json:"disabled,omitempty" tf:"disabled,omitempty"` + + // Folder ID for the Yandex Cloud Function + // +kubebuilder:validation:Optional + FolderID *string `json:"folderId,omitempty" tf:"folder_id,omitempty"` + + // Log entries are written to specified log group + // +kubebuilder:validation:Optional + LogGroupID *string `json:"logGroupId,omitempty" tf:"log_group_id,omitempty"` + + // Minimum log entry level + // +kubebuilder:validation:Optional + MinLevel *string `json:"minLevel,omitempty" tf:"min_level,omitempty"` +} + +type PackageInitParameters struct { + + // Name of the bucket that stores the code for the version. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Name of the object in the bucket that stores the code for the version. + ObjectName *string `json:"objectName,omitempty" tf:"object_name,omitempty"` + + // SHA256 hash of the version deployment package. + Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` +} + +type PackageObservation struct { + + // Name of the bucket that stores the code for the version. + BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"` + + // Name of the object in the bucket that stores the code for the version. + ObjectName *string `json:"objectName,omitempty" tf:"object_name,omitempty"` + + // SHA256 hash of the version deployment package. + Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` +} + +type PackageParameters struct { + + // Name of the bucket that stores the code for the version. + // +kubebuilder:validation:Optional + BucketName *string `json:"bucketName" tf:"bucket_name,omitempty"` + + // Name of the object in the bucket that stores the code for the version. + // +kubebuilder:validation:Optional + ObjectName *string `json:"objectName" tf:"object_name,omitempty"` + + // SHA256 hash of the version deployment package. + // +kubebuilder:validation:Optional + Sha256 *string `json:"sha256,omitempty" tf:"sha_256,omitempty"` +} + +type SecretsInitParameters struct { + + // Function's environment variable in which secret's value will be stored. + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Secret's id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Secret's entries key which value will be stored in environment variable. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Secret's version id. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SecretsObservation struct { + + // Function's environment variable in which secret's value will be stored. + EnvironmentVariable *string `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Secret's id. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Secret's entries key which value will be stored in environment variable. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Secret's version id. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type SecretsParameters struct { + + // Function's environment variable in which secret's value will be stored. + // +kubebuilder:validation:Optional + EnvironmentVariable *string `json:"environmentVariable" tf:"environment_variable,omitempty"` + + // Secret's id. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Secret's entries key which value will be stored in environment variable. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Secret's version id. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId" tf:"version_id,omitempty"` +} + +type StorageMountsInitParameters struct { + + // Name of the mounting bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path. + MountPointName *string `json:"mountPointName,omitempty" tf:"mount_point_name,omitempty"` + + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Mount the bucket in read-only mode. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type StorageMountsObservation struct { + + // Name of the mounting bucket. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path. + MountPointName *string `json:"mountPointName,omitempty" tf:"mount_point_name,omitempty"` + + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Mount the bucket in read-only mode. + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type StorageMountsParameters struct { + + // Name of the mounting bucket. + // +kubebuilder:validation:Optional + Bucket *string `json:"bucket" tf:"bucket,omitempty"` + + // Name of the mount point. The directory where the bucket is mounted will be accessible at the /function/storage/ path. + // +kubebuilder:validation:Optional + MountPointName *string `json:"mountPointName" tf:"mount_point_name,omitempty"` + + // Prefix within the bucket. If you leave this field empty, the entire bucket will be mounted. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Mount the bucket in read-only mode. + // +kubebuilder:validation:Optional + ReadOnly *bool `json:"readOnly,omitempty" tf:"read_only,omitempty"` +} + +type YmqFailureTargetInitParameters struct { + + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type YmqFailureTargetObservation struct { + + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Service account ID for Yandex Cloud Function + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type YmqFailureTargetParameters struct { + + // YMQ ARN + // +kubebuilder:validation:Optional + Arn *string `json:"arn" tf:"arn,omitempty"` + + // Service account ID for Yandex Cloud Function + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` +} + +type YmqSuccessTargetInitParameters struct { + + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Service account used for writing result to queue + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type YmqSuccessTargetObservation struct { + + // YMQ ARN + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // Service account used for writing result to queue + ServiceAccountID *string `json:"serviceAccountId,omitempty" tf:"service_account_id,omitempty"` +} + +type YmqSuccessTargetParameters struct { + + // YMQ ARN + // +kubebuilder:validation:Optional + Arn *string `json:"arn" tf:"arn,omitempty"` + + // Service account used for writing result to queue + // +kubebuilder:validation:Optional + ServiceAccountID *string `json:"serviceAccountId" tf:"service_account_id,omitempty"` +} + +// FunctionSpec defines the desired state of Function +type FunctionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionInitParameters `json:"initProvider,omitempty"` +} + +// FunctionStatus defines the observed state of Function. +type FunctionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Function is the Schema for the Functions API. Allows management of a Yandex Cloud Function. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,yandex-cloud} +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.entrypoint) || (has(self.initProvider) && has(self.initProvider.entrypoint))",message="spec.forProvider.entrypoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.memory) || (has(self.initProvider) && has(self.initProvider.memory))",message="spec.forProvider.memory is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.runtime) || (has(self.initProvider) && has(self.initProvider.runtime))",message="spec.forProvider.runtime is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.userHash) || (has(self.initProvider) && has(self.initProvider.userHash))",message="spec.forProvider.userHash is a required parameter" + Spec FunctionSpec `json:"spec"` + Status FunctionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionList contains a list of Functions +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +// Repository type metadata. +var ( + Function_Kind = "Function" + Function_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Function_Kind}.String() + Function_KindAPIVersion = Function_Kind + "." + CRDGroupVersion.String() + Function_GroupVersionKind = CRDGroupVersion.WithKind(Function_Kind) +) + +func init() { + SchemeBuilder.Register(&Function{}, &FunctionList{}) +} diff --git a/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go b/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000..69d97c3 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.conversion_hubs.go @@ -0,0 +1,6 @@ +// Code generated by upjet. DO NOT EDIT. + +package v1alpha1 + +// Hub marks this type as a conversion hub. +func (tr *Function) Hub() {} diff --git a/apis/yandex/v1alpha1/zz_generated.deepcopy.go b/apis/yandex/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..88bf919 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1427 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInvocationInitParameters) DeepCopyInto(out *AsyncInvocationInitParameters) { + *out = *in + if in.RetriesCount != nil { + in, out := &in.RetriesCount, &out.RetriesCount + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.YmqFailureTarget != nil { + in, out := &in.YmqFailureTarget, &out.YmqFailureTarget + *out = make([]YmqFailureTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YmqSuccessTarget != nil { + in, out := &in.YmqSuccessTarget, &out.YmqSuccessTarget + *out = make([]YmqSuccessTargetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInvocationInitParameters. +func (in *AsyncInvocationInitParameters) DeepCopy() *AsyncInvocationInitParameters { + if in == nil { + return nil + } + out := new(AsyncInvocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInvocationObservation) DeepCopyInto(out *AsyncInvocationObservation) { + *out = *in + if in.RetriesCount != nil { + in, out := &in.RetriesCount, &out.RetriesCount + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.YmqFailureTarget != nil { + in, out := &in.YmqFailureTarget, &out.YmqFailureTarget + *out = make([]YmqFailureTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YmqSuccessTarget != nil { + in, out := &in.YmqSuccessTarget, &out.YmqSuccessTarget + *out = make([]YmqSuccessTargetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInvocationObservation. +func (in *AsyncInvocationObservation) DeepCopy() *AsyncInvocationObservation { + if in == nil { + return nil + } + out := new(AsyncInvocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AsyncInvocationParameters) DeepCopyInto(out *AsyncInvocationParameters) { + *out = *in + if in.RetriesCount != nil { + in, out := &in.RetriesCount, &out.RetriesCount + *out = new(float64) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.YmqFailureTarget != nil { + in, out := &in.YmqFailureTarget, &out.YmqFailureTarget + *out = make([]YmqFailureTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YmqSuccessTarget != nil { + in, out := &in.YmqSuccessTarget, &out.YmqSuccessTarget + *out = make([]YmqSuccessTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AsyncInvocationParameters. +func (in *AsyncInvocationParameters) DeepCopy() *AsyncInvocationParameters { + if in == nil { + return nil + } + out := new(AsyncInvocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityInitParameters) DeepCopyInto(out *ConnectivityInitParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityInitParameters. +func (in *ConnectivityInitParameters) DeepCopy() *ConnectivityInitParameters { + if in == nil { + return nil + } + out := new(ConnectivityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityObservation) DeepCopyInto(out *ConnectivityObservation) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityObservation. +func (in *ConnectivityObservation) DeepCopy() *ConnectivityObservation { + if in == nil { + return nil + } + out := new(ConnectivityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityParameters) DeepCopyInto(out *ConnectivityParameters) { + *out = *in + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityParameters. +func (in *ConnectivityParameters) DeepCopy() *ConnectivityParameters { + if in == nil { + return nil + } + out := new(ConnectivityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentInitParameters) DeepCopyInto(out *ContentInitParameters) { + *out = *in + if in.ZipFilename != nil { + in, out := &in.ZipFilename, &out.ZipFilename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentInitParameters. +func (in *ContentInitParameters) DeepCopy() *ContentInitParameters { + if in == nil { + return nil + } + out := new(ContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentObservation) DeepCopyInto(out *ContentObservation) { + *out = *in + if in.ZipFilename != nil { + in, out := &in.ZipFilename, &out.ZipFilename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentObservation. +func (in *ContentObservation) DeepCopy() *ContentObservation { + if in == nil { + return nil + } + out := new(ContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentParameters) DeepCopyInto(out *ContentParameters) { + *out = *in + if in.ZipFilename != nil { + in, out := &in.ZipFilename, &out.ZipFilename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentParameters. +func (in *ContentParameters) DeepCopy() *ContentParameters { + if in == nil { + return nil + } + out := new(ContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionInitParameters) DeepCopyInto(out *FunctionInitParameters) { + *out = *in + if in.AsyncInvocation != nil { + in, out := &in.AsyncInvocation, &out.AsyncInvocation + *out = make([]AsyncInvocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]ContentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Package != nil { + in, out := &in.Package, &out.Package + *out = make([]PackageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TmpfsSize != nil { + in, out := &in.TmpfsSize, &out.TmpfsSize + *out = new(float64) + **out = **in + } + if in.UserHash != nil { + in, out := &in.UserHash, &out.UserHash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionInitParameters. +func (in *FunctionInitParameters) DeepCopy() *FunctionInitParameters { + if in == nil { + return nil + } + out := new(FunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionObservation) DeepCopyInto(out *FunctionObservation) { + *out = *in + if in.AsyncInvocation != nil { + in, out := &in.AsyncInvocation, &out.AsyncInvocation + *out = make([]AsyncInvocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]ContentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageSize != nil { + in, out := &in.ImageSize, &out.ImageSize + *out = new(float64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoggroupID != nil { + in, out := &in.LoggroupID, &out.LoggroupID + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Package != nil { + in, out := &in.Package, &out.Package + *out = make([]PackageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TmpfsSize != nil { + in, out := &in.TmpfsSize, &out.TmpfsSize + *out = new(float64) + **out = **in + } + if in.UserHash != nil { + in, out := &in.UserHash, &out.UserHash + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionObservation. +func (in *FunctionObservation) DeepCopy() *FunctionObservation { + if in == nil { + return nil + } + out := new(FunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionParameters) DeepCopyInto(out *FunctionParameters) { + *out = *in + if in.AsyncInvocation != nil { + in, out := &in.AsyncInvocation, &out.AsyncInvocation + *out = make([]AsyncInvocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Concurrency != nil { + in, out := &in.Concurrency, &out.Concurrency + *out = new(float64) + **out = **in + } + if in.Connectivity != nil { + in, out := &in.Connectivity, &out.Connectivity + *out = make([]ConnectivityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]ContentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = new(string) + **out = **in + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.FolderIDRef != nil { + in, out := &in.FolderIDRef, &out.FolderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FolderIDSelector != nil { + in, out := &in.FolderIDSelector, &out.FolderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.LogOptions != nil { + in, out := &in.LogOptions, &out.LogOptions + *out = make([]LogOptionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Package != nil { + in, out := &in.Package, &out.Package + *out = make([]PackageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } + if in.StorageMounts != nil { + in, out := &in.StorageMounts, &out.StorageMounts + *out = make([]StorageMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TmpfsSize != nil { + in, out := &in.TmpfsSize, &out.TmpfsSize + *out = new(float64) + **out = **in + } + if in.UserHash != nil { + in, out := &in.UserHash, &out.UserHash + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionParameters. +func (in *FunctionParameters) DeepCopy() *FunctionParameters { + if in == nil { + return nil + } + out := new(FunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsInitParameters) DeepCopyInto(out *LogOptionsInitParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsInitParameters. +func (in *LogOptionsInitParameters) DeepCopy() *LogOptionsInitParameters { + if in == nil { + return nil + } + out := new(LogOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsObservation) DeepCopyInto(out *LogOptionsObservation) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsObservation. +func (in *LogOptionsObservation) DeepCopy() *LogOptionsObservation { + if in == nil { + return nil + } + out := new(LogOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogOptionsParameters) DeepCopyInto(out *LogOptionsParameters) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + if in.FolderID != nil { + in, out := &in.FolderID, &out.FolderID + *out = new(string) + **out = **in + } + if in.LogGroupID != nil { + in, out := &in.LogGroupID, &out.LogGroupID + *out = new(string) + **out = **in + } + if in.MinLevel != nil { + in, out := &in.MinLevel, &out.MinLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOptionsParameters. +func (in *LogOptionsParameters) DeepCopy() *LogOptionsParameters { + if in == nil { + return nil + } + out := new(LogOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageInitParameters) DeepCopyInto(out *PackageInitParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectName != nil { + in, out := &in.ObjectName, &out.ObjectName + *out = new(string) + **out = **in + } + if in.Sha256 != nil { + in, out := &in.Sha256, &out.Sha256 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageInitParameters. +func (in *PackageInitParameters) DeepCopy() *PackageInitParameters { + if in == nil { + return nil + } + out := new(PackageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageObservation) DeepCopyInto(out *PackageObservation) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectName != nil { + in, out := &in.ObjectName, &out.ObjectName + *out = new(string) + **out = **in + } + if in.Sha256 != nil { + in, out := &in.Sha256, &out.Sha256 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageObservation. +func (in *PackageObservation) DeepCopy() *PackageObservation { + if in == nil { + return nil + } + out := new(PackageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageParameters) DeepCopyInto(out *PackageParameters) { + *out = *in + if in.BucketName != nil { + in, out := &in.BucketName, &out.BucketName + *out = new(string) + **out = **in + } + if in.ObjectName != nil { + in, out := &in.ObjectName, &out.ObjectName + *out = new(string) + **out = **in + } + if in.Sha256 != nil { + in, out := &in.Sha256, &out.Sha256 + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageParameters. +func (in *PackageParameters) DeepCopy() *PackageParameters { + if in == nil { + return nil + } + out := new(PackageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsInitParameters) DeepCopyInto(out *SecretsInitParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsInitParameters. +func (in *SecretsInitParameters) DeepCopy() *SecretsInitParameters { + if in == nil { + return nil + } + out := new(SecretsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsObservation) DeepCopyInto(out *SecretsObservation) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsObservation. +func (in *SecretsObservation) DeepCopy() *SecretsObservation { + if in == nil { + return nil + } + out := new(SecretsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsParameters) DeepCopyInto(out *SecretsParameters) { + *out = *in + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsParameters. +func (in *SecretsParameters) DeepCopy() *SecretsParameters { + if in == nil { + return nil + } + out := new(SecretsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsInitParameters) DeepCopyInto(out *StorageMountsInitParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointName != nil { + in, out := &in.MountPointName, &out.MountPointName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsInitParameters. +func (in *StorageMountsInitParameters) DeepCopy() *StorageMountsInitParameters { + if in == nil { + return nil + } + out := new(StorageMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsObservation) DeepCopyInto(out *StorageMountsObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointName != nil { + in, out := &in.MountPointName, &out.MountPointName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsObservation. +func (in *StorageMountsObservation) DeepCopy() *StorageMountsObservation { + if in == nil { + return nil + } + out := new(StorageMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageMountsParameters) DeepCopyInto(out *StorageMountsParameters) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.MountPointName != nil { + in, out := &in.MountPointName, &out.MountPointName + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageMountsParameters. +func (in *StorageMountsParameters) DeepCopy() *StorageMountsParameters { + if in == nil { + return nil + } + out := new(StorageMountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqFailureTargetInitParameters) DeepCopyInto(out *YmqFailureTargetInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqFailureTargetInitParameters. +func (in *YmqFailureTargetInitParameters) DeepCopy() *YmqFailureTargetInitParameters { + if in == nil { + return nil + } + out := new(YmqFailureTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqFailureTargetObservation) DeepCopyInto(out *YmqFailureTargetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqFailureTargetObservation. +func (in *YmqFailureTargetObservation) DeepCopy() *YmqFailureTargetObservation { + if in == nil { + return nil + } + out := new(YmqFailureTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqFailureTargetParameters) DeepCopyInto(out *YmqFailureTargetParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqFailureTargetParameters. +func (in *YmqFailureTargetParameters) DeepCopy() *YmqFailureTargetParameters { + if in == nil { + return nil + } + out := new(YmqFailureTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqSuccessTargetInitParameters) DeepCopyInto(out *YmqSuccessTargetInitParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqSuccessTargetInitParameters. +func (in *YmqSuccessTargetInitParameters) DeepCopy() *YmqSuccessTargetInitParameters { + if in == nil { + return nil + } + out := new(YmqSuccessTargetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqSuccessTargetObservation) DeepCopyInto(out *YmqSuccessTargetObservation) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqSuccessTargetObservation. +func (in *YmqSuccessTargetObservation) DeepCopy() *YmqSuccessTargetObservation { + if in == nil { + return nil + } + out := new(YmqSuccessTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YmqSuccessTargetParameters) DeepCopyInto(out *YmqSuccessTargetParameters) { + *out = *in + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.ServiceAccountID != nil { + in, out := &in.ServiceAccountID, &out.ServiceAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YmqSuccessTargetParameters. +func (in *YmqSuccessTargetParameters) DeepCopy() *YmqSuccessTargetParameters { + if in == nil { + return nil + } + out := new(YmqSuccessTargetParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/yandex/v1alpha1/zz_generated.managed.go b/apis/yandex/v1alpha1/zz_generated.managed.go new file mode 100644 index 0000000..5acf575 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.managed.go @@ -0,0 +1,65 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Function. +func (mg *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Function. +func (mg *Function) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Function. +func (mg *Function) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Function. +func (mg *Function) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Function. +func (mg *Function) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Function. +func (mg *Function) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Function. +func (mg *Function) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Function. +func (mg *Function) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Function. +func (mg *Function) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Function. +func (mg *Function) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Function. +func (mg *Function) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Function. +func (mg *Function) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/yandex/v1alpha1/zz_generated.managedlist.go b/apis/yandex/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 0000000..cd7f836 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,14 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FunctionList. +func (l *FunctionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/yandex/v1alpha1/zz_generated.resolvers.go b/apis/yandex/v1alpha1/zz_generated.resolvers.go new file mode 100644 index 0000000..e790d73 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_generated.resolvers.go @@ -0,0 +1,53 @@ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Function. +func (mg *Function) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FolderIDRef, + Selector: mg.Spec.ForProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FolderID") + } + mg.Spec.ForProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FolderIDRef = rsp.ResolvedReference + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FolderID), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.FolderIDRef, + Selector: mg.Spec.InitProvider.FolderIDSelector, + To: reference.To{ + List: &v1alpha1.FolderList{}, + Managed: &v1alpha1.Folder{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FolderID") + } + mg.Spec.InitProvider.FolderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FolderIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/yandex/v1alpha1/zz_groupversion_info.go b/apis/yandex/v1alpha1/zz_groupversion_info.go new file mode 100755 index 0000000..7cbc2b6 --- /dev/null +++ b/apis/yandex/v1alpha1/zz_groupversion_info.go @@ -0,0 +1,28 @@ +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=yandex.yandex-cloud.upjet.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "yandex.yandex-cloud.upjet.crossplane.io" + CRDVersion = "v1alpha1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/zz_register.go b/apis/zz_register.go index 96f3ce7..3db7569 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -7,12 +7,20 @@ import ( "k8s.io/apimachinery/pkg/runtime" v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/alb/v1alpha1" + v1alpha1api "github.com/tagesjump/provider-upjet-yc/apis/api/v1alpha1" v1alpha1audit "github.com/tagesjump/provider-upjet-yc/apis/audit/v1alpha1" + v1alpha1backup "github.com/tagesjump/provider-upjet-yc/apis/backup/v1alpha1" + v1alpha1billing "github.com/tagesjump/provider-upjet-yc/apis/billing/v1alpha1" + v1alpha1cdn "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" + v1alpha1cm "github.com/tagesjump/provider-upjet-yc/apis/cm/v1alpha1" v1alpha1compute "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" v1alpha1container "github.com/tagesjump/provider-upjet-yc/apis/container/v1alpha1" + v1alpha1dataproc "github.com/tagesjump/provider-upjet-yc/apis/dataproc/v1alpha1" v1alpha1datatransfer "github.com/tagesjump/provider-upjet-yc/apis/datatransfer/v1alpha1" v1alpha1dns "github.com/tagesjump/provider-upjet-yc/apis/dns/v1alpha1" + v1alpha1function "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" v1alpha1iam "github.com/tagesjump/provider-upjet-yc/apis/iam/v1alpha1" + v1alpha1iot "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" v1alpha1kms "github.com/tagesjump/provider-upjet-yc/apis/kms/v1alpha1" v1alpha1kubernetes "github.com/tagesjump/provider-upjet-yc/apis/kubernetes/v1alpha1" v1alpha1lb "github.com/tagesjump/provider-upjet-yc/apis/lb/v1alpha1" @@ -24,12 +32,14 @@ import ( v1alpha1monitoring "github.com/tagesjump/provider-upjet-yc/apis/monitoring/v1alpha1" v1alpha1organizationmanager "github.com/tagesjump/provider-upjet-yc/apis/organizationmanager/v1alpha1" v1alpha1resourcemanager "github.com/tagesjump/provider-upjet-yc/apis/resourcemanager/v1alpha1" + v1alpha1serverless "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" v1alpha1smartcaptcha "github.com/tagesjump/provider-upjet-yc/apis/smartcaptcha/v1alpha1" v1alpha1storage "github.com/tagesjump/provider-upjet-yc/apis/storage/v1alpha1" v1alpha1sws "github.com/tagesjump/provider-upjet-yc/apis/sws/v1alpha1" v1alpha1apis "github.com/tagesjump/provider-upjet-yc/apis/v1alpha1" v1beta1 "github.com/tagesjump/provider-upjet-yc/apis/v1beta1" v1alpha1vpc "github.com/tagesjump/provider-upjet-yc/apis/vpc/v1alpha1" + v1alpha1yandex "github.com/tagesjump/provider-upjet-yc/apis/yandex/v1alpha1" v1alpha1ydb "github.com/tagesjump/provider-upjet-yc/apis/ydb/v1alpha1" ) @@ -37,12 +47,20 @@ func init() { // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme, + v1alpha1api.SchemeBuilder.AddToScheme, v1alpha1audit.SchemeBuilder.AddToScheme, + v1alpha1backup.SchemeBuilder.AddToScheme, + v1alpha1billing.SchemeBuilder.AddToScheme, + v1alpha1cdn.SchemeBuilder.AddToScheme, + v1alpha1cm.SchemeBuilder.AddToScheme, v1alpha1compute.SchemeBuilder.AddToScheme, v1alpha1container.SchemeBuilder.AddToScheme, + v1alpha1dataproc.SchemeBuilder.AddToScheme, v1alpha1datatransfer.SchemeBuilder.AddToScheme, v1alpha1dns.SchemeBuilder.AddToScheme, + v1alpha1function.SchemeBuilder.AddToScheme, v1alpha1iam.SchemeBuilder.AddToScheme, + v1alpha1iot.SchemeBuilder.AddToScheme, v1alpha1kms.SchemeBuilder.AddToScheme, v1alpha1kubernetes.SchemeBuilder.AddToScheme, v1alpha1lb.SchemeBuilder.AddToScheme, @@ -54,12 +72,14 @@ func init() { v1alpha1monitoring.SchemeBuilder.AddToScheme, v1alpha1organizationmanager.SchemeBuilder.AddToScheme, v1alpha1resourcemanager.SchemeBuilder.AddToScheme, + v1alpha1serverless.SchemeBuilder.AddToScheme, v1alpha1smartcaptcha.SchemeBuilder.AddToScheme, v1alpha1storage.SchemeBuilder.AddToScheme, v1alpha1sws.SchemeBuilder.AddToScheme, v1alpha1apis.SchemeBuilder.AddToScheme, v1beta1.SchemeBuilder.AddToScheme, v1alpha1vpc.SchemeBuilder.AddToScheme, + v1alpha1yandex.SchemeBuilder.AddToScheme, v1alpha1ydb.SchemeBuilder.AddToScheme, ) } diff --git a/config/cdn/config.go b/config/cdn/config.go new file mode 100644 index 0000000..9e3f52d --- /dev/null +++ b/config/cdn/config.go @@ -0,0 +1,15 @@ +package cdn + +import ( + ujconfig "github.com/crossplane/upjet/pkg/config" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-upjet-yc/config/cdn" +) + +// Configure adds configurations for cdn group. +func Configure(p *ujconfig.Provider) {} diff --git a/config/cm/config.go b/config/cm/config.go new file mode 100644 index 0000000..a6e82d2 --- /dev/null +++ b/config/cm/config.go @@ -0,0 +1,15 @@ +package cm + +import ( + ujconfig "github.com/crossplane/upjet/pkg/config" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-upjet-yc/apis/cm/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-upjet-yc/config/cm" +) + +// Configure adds configurations for cm group. +func Configure(p *ujconfig.Provider) {} diff --git a/config/compute/config.go b/config/compute/config.go index d85f815..aaa397a 100644 --- a/config/compute/config.go +++ b/config/compute/config.go @@ -37,6 +37,25 @@ func Configure(p *ujconfig.Provider) { Type: "Disk", } r.UseAsync = true + r.Sensitive.AdditionalConnectionDetailsFn = func(attr map[string]interface{}) (map[string][]byte, error) { + data := make(map[string][]byte) + if v, ok := attr["fqdn"].(string); ok && v != "" { + data["fqdn"] = []byte(v) + } + if networkInterfaces, ok := attr["network_interface"].([]interface{}); ok { + if len(networkInterfaces) > 0 { + if networkInterface, ok := networkInterfaces[0].(map[string]interface{}); ok { + if v, ok := networkInterface["ip_address"].(string); ok && v != "" { + data["internal_ip"] = []byte(v) + } + if v, ok := networkInterface["nat_ip_address"].(string); ok && v != "" { + data["external_ip"] = []byte(v) + } + } + } + } + return data, nil + } }) p.AddResourceConfigurator("yandex_compute_instance_group", func(r *ujconfig.Resource) { diff --git a/config/external_name.go b/config/external_name.go index bae4c15..c5258b2 100644 --- a/config/external_name.go +++ b/config/external_name.go @@ -7,10 +7,15 @@ import ( "github.com/tagesjump/provider-upjet-yc/config/resourcemanager" ) -// ExternalNameConfigs contains all external name configurations for this -// provider. -var ExternalNameConfigs = map[string]config.ExternalName{ +// TerraformPluginSDKExternalNameConfigs contains all external name configurations +// belonging to Terraform Plugin SDKv2 resources to be reconciled +// under the no-fork architecture for this provider. +var TerraformPluginSDKExternalNameConfigs = map[string]config.ExternalName{ // Import requires using a randomly generated ID from provider: nl-2e21sda + "yandex_airflow_cluster": config.IdentifierFromProvider, + "yandex_api_gateway": config.IdentifierFromProvider, + "yandex_backup_policy": config.IdentifierFromProvider, + "yandex_backup_policy_bindings": config.IdentifierFromProvider, "yandex_iam_service_account": config.NameAsIdentifier, "yandex_iam_service_account_key": config.NameAsIdentifier, "yandex_iam_service_account_api_key": config.NameAsIdentifier, @@ -49,6 +54,7 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "yandex_vpc_security_group": config.IdentifierFromProvider, "yandex_vpc_security_group_rule": config.IdentifierFromProvider, "yandex_vpc_address": config.IdentifierFromProvider, + "yandex_vpc_private_endpoint": config.IdentifierFromProvider, "yandex_kubernetes_cluster": config.IdentifierFromProvider, "yandex_kubernetes_node_group": config.IdentifierFromProvider, "yandex_mdb_clickhouse_cluster": config.IdentifierFromProvider, @@ -76,7 +82,6 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "yandex_compute_image": config.IdentifierFromProvider, "yandex_compute_instance": config.IdentifierFromProvider, "yandex_compute_instance_group": config.IdentifierFromProvider, - "yandex_compute_instance_migrate": config.IdentifierFromProvider, "yandex_compute_placement_group": config.IdentifierFromProvider, "yandex_compute_snapshot": config.IdentifierFromProvider, "yandex_compute_snapshot_schedule": config.IdentifierFromProvider, @@ -109,32 +114,69 @@ var ExternalNameConfigs = map[string]config.ExternalName{ "yandex_ydb_topic": config.IdentifierFromProvider, "yandex_datatransfer_endpoint": config.IdentifierFromProvider, "yandex_datatransfer_transfer": config.IdentifierFromProvider, - "yandex_lockbox_secret": config.IdentifierFromProvider, - "yandex_lockbox_secret_iam_binding": config.IdentifierFromProvider, - "yandex_lockbox_secret_version": config.IdentifierFromProvider, - "yandex_monitoring_dashboard": config.IdentifierFromProvider, - "yandex_loadtesting_agent": config.IdentifierFromProvider, - "yandex_mdb_opensearch_cluster": config.IdentifierFromProvider, - "yandex_audit_trails_trail": config.IdentifierFromProvider, + "yandex_dataproc_cluster": config.IdentifierFromProvider, + // "yandex_datasphere_community": config.IdentifierFromProvider, + // "yandex_datasphere_community_iam_binding": config.IdentifierFromProvider, + // "yandex_datasphere_project": config.IdentifierFromProvider, + // "yandex_datasphere_project_iam_binding": config.IdentifierFromProvider, + "yandex_lockbox_secret": config.IdentifierFromProvider, + "yandex_lockbox_secret_iam_binding": config.IdentifierFromProvider, + "yandex_lockbox_secret_version": config.IdentifierFromProvider, + "yandex_monitoring_dashboard": config.IdentifierFromProvider, + "yandex_loadtesting_agent": config.IdentifierFromProvider, + "yandex_mdb_opensearch_cluster": config.IdentifierFromProvider, + "yandex_audit_trails_trail": config.IdentifierFromProvider, // "yandex_lockbox_secret_version_hashed": config.IdentifierFromProvider, - "yandex_sws_security_profile": config.IdentifierFromProvider, - "yandex_smartcaptcha_captcha": config.IdentifierFromProvider, + "yandex_sws_security_profile": config.IdentifierFromProvider, + "yandex_smartcaptcha_captcha": config.IdentifierFromProvider, + "yandex_cm_certificate": config.IdentifierFromProvider, + "yandex_cdn_resource": config.IdentifierFromProvider, + "yandex_cdn_origin_group": config.IdentifierFromProvider, + "yandex_function": config.IdentifierFromProvider, + "yandex_function_iam_binding": config.IdentifierFromProvider, + "yandex_function_scaling_policy": config.IdentifierFromProvider, + "yandex_function_trigger": config.IdentifierFromProvider, + "yandex_iot_core_broker": config.IdentifierFromProvider, + "yandex_iot_core_device": config.IdentifierFromProvider, + "yandex_iot_core_registry": config.IdentifierFromProvider, + "yandex_serverless_container": config.IdentifierFromProvider, + "yandex_serverless_container_iam_binding": config.IdentifierFromProvider, +} + +// TerraformPluginFrameworkExternalNameConfigs contains all external +// name configurations belonging to Terraform Plugin Framework +// resources to be reconciled under the no-fork architecture for this +// provider. +var TerraformPluginFrameworkExternalNameConfigs = map[string]config.ExternalName{ + "yandex_mdb_mongodb_user": config.IdentifierFromProvider, + "yandex_mdb_mongodb_database": config.IdentifierFromProvider, + "yandex_compute_disk_placement_group_iam_binding": config.IdentifierFromProvider, + "yandex_compute_disk_iam_binding": config.IdentifierFromProvider, + "yandex_compute_image_iam_binding": config.IdentifierFromProvider, + "yandex_compute_snapshot_iam_binding": config.IdentifierFromProvider, + "yandex_compute_instance_iam_binding": config.IdentifierFromProvider, + "yandex_compute_filesystem_iam_binding": config.IdentifierFromProvider, + "yandex_compute_gpu_cluster_iam_binding": config.IdentifierFromProvider, + "yandex_compute_placement_group_iam_binding": config.IdentifierFromProvider, + "yandex_compute_snapshot_schedule_iam_binding": config.IdentifierFromProvider, + "yandex_billing_cloud_binding": config.IdentifierFromProvider, } // cliReconciledExternalNameConfigs contains all external name configurations // belonging to Terraform resources to be reconciled under the CLI-based // architecture for this provider. -var cliReconciledExternalNameConfigs = map[string]config.ExternalName{ - "yandex_mdb_mongodb_user": config.IdentifierFromProvider, - "yandex_mdb_mongodb_database": config.IdentifierFromProvider, -} +var cliReconciledExternalNameConfigs = map[string]config.ExternalName{} // ExternalNameConfigurations applies all external name configs listed in the // table ExternalNameConfigs and sets the version of those resources to v1beta1 // assuming they will be tested. func ExternalNameConfigurations() config.ResourceOption { return func(r *config.Resource) { - if e, ok := ExternalNameConfigs[r.Name]; ok { + if e, ok := TerraformPluginSDKExternalNameConfigs[r.Name]; ok { + r.ExternalName = e + } else if e, ok := TerraformPluginFrameworkExternalNameConfigs[r.Name]; ok { + r.ExternalName = e + } else if e, ok := cliReconciledExternalNameConfigs[r.Name]; ok { r.ExternalName = e } if (r.ShortGroup != "resourcemanager" && r.ShortGroup != "organizationmanager") || diff --git a/config/function/config.go b/config/function/config.go new file mode 100644 index 0000000..2b2ce7d --- /dev/null +++ b/config/function/config.go @@ -0,0 +1,15 @@ +package function + +import ( + ujconfig "github.com/crossplane/upjet/pkg/config" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-upjet-yc/config/function" +) + +// Configure adds configurations for function group. +func Configure(p *ujconfig.Provider) {} diff --git a/config/iot/config.go b/config/iot/config.go new file mode 100644 index 0000000..3ee4661 --- /dev/null +++ b/config/iot/config.go @@ -0,0 +1,15 @@ +package iot + +import ( + ujconfig "github.com/crossplane/upjet/pkg/config" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-upjet-yc/config/iot" +) + +// Configure adds configurations for iot group. +func Configure(p *ujconfig.Provider) {} diff --git a/config/mdb/config.go b/config/mdb/config.go index 155cbc8..b642829 100644 --- a/config/mdb/config.go +++ b/config/mdb/config.go @@ -293,7 +293,6 @@ func Configure(p *ujconfig.Provider) { r.References["security_group_ids"] = ujconfig.Reference{ Type: fmt.Sprintf("%s.%s", vpc.ApisPackagePath, "SecurityGroup"), } - r.UseAsync = true r.Sensitive.AdditionalConnectionDetailsFn = func(attr map[string]interface{}) (map[string][]byte, error) { return postgresqlConnDetails(attr), nil } @@ -303,13 +302,11 @@ func Configure(p *ujconfig.Provider) { r.References["cluster_id"] = ujconfig.Reference{ Type: "PostgresqlCluster", } - r.UseAsync = true }) p.AddResourceConfigurator("yandex_mdb_postgresql_user", func(r *ujconfig.Resource) { r.References["cluster_id"] = ujconfig.Reference{ Type: "PostgresqlCluster", } - r.UseAsync = true }) p.AddResourceConfigurator("yandex_mdb_mysql_cluster", func(r *ujconfig.Resource) { r.References["network_id"] = ujconfig.Reference{ diff --git a/config/provider.go b/config/provider.go index b9b3cb6..83381e0 100644 --- a/config/provider.go +++ b/config/provider.go @@ -6,6 +6,7 @@ package config import ( "github.com/yandex-cloud/terraform-provider-yandex/yandex" + yandex_framework "github.com/yandex-cloud/terraform-provider-yandex/yandex-framework/provider" // Note(turkenh): we are importing this to embed provider schema document _ "embed" @@ -18,11 +19,15 @@ import ( ujconfig "github.com/crossplane/upjet/pkg/config" "github.com/tagesjump/provider-upjet-yc/config/alb" + "github.com/tagesjump/provider-upjet-yc/config/cdn" + "github.com/tagesjump/provider-upjet-yc/config/cm" "github.com/tagesjump/provider-upjet-yc/config/compute" "github.com/tagesjump/provider-upjet-yc/config/container" "github.com/tagesjump/provider-upjet-yc/config/datatransfer" "github.com/tagesjump/provider-upjet-yc/config/dns" + "github.com/tagesjump/provider-upjet-yc/config/function" "github.com/tagesjump/provider-upjet-yc/config/iam" + "github.com/tagesjump/provider-upjet-yc/config/iot" "github.com/tagesjump/provider-upjet-yc/config/kms" "github.com/tagesjump/provider-upjet-yc/config/kubernetes" "github.com/tagesjump/provider-upjet-yc/config/lb" @@ -34,6 +39,7 @@ import ( "github.com/tagesjump/provider-upjet-yc/config/monitoring" "github.com/tagesjump/provider-upjet-yc/config/organizationmanager" "github.com/tagesjump/provider-upjet-yc/config/resourcemanager" + "github.com/tagesjump/provider-upjet-yc/config/serverless" "github.com/tagesjump/provider-upjet-yc/config/storage" "github.com/tagesjump/provider-upjet-yc/config/vpc" "github.com/tagesjump/provider-upjet-yc/config/ydb" @@ -73,12 +79,12 @@ func getProviderSchema(s string) (*schema.Provider, error) { // GetProvider returns provider configuration func GetProvider(generationProvider bool) (*ujconfig.Provider, error) { - var p *schema.Provider + var providerInstance *schema.Provider var err error if generationProvider { - p, err = getProviderSchema(providerSchema) + providerInstance, err = getProviderSchema(providerSchema) } else { - p = yandex.NewSDKProvider() + providerInstance = yandex.NewSDKProvider() } if err != nil { return nil, errors.Wrapf(err, "cannot get the Terraform provider schema with generation mode set to %t", generationProvider) @@ -86,14 +92,16 @@ func GetProvider(generationProvider bool) (*ujconfig.Provider, error) { pc := ujconfig.NewProvider([]byte(providerSchema), resourcePrefix, modulePath, []byte(providerMetadata), ujconfig.WithRootGroup("yandex-cloud.upjet.crossplane.io"), - ujconfig.WithIncludeList(resourceList(cliReconciledExternalNameConfigs)), - ujconfig.WithTerraformPluginSDKIncludeList(resourceList(ExternalNameConfigs)), ujconfig.WithFeaturesPackage("internal/features"), + ujconfig.WithTerraformProvider(providerInstance), + ujconfig.WithTerraformPluginSDKIncludeList(resourceList(TerraformPluginSDKExternalNameConfigs)), + ujconfig.WithTerraformPluginFrameworkProvider(yandex_framework.NewFrameworkProvider()), + ujconfig.WithTerraformPluginFrameworkIncludeList(resourceList(TerraformPluginFrameworkExternalNameConfigs)), + ujconfig.WithIncludeList(resourceList(cliReconciledExternalNameConfigs)), ujconfig.WithDefaultResourceOptions( ExternalNameConfigurations(), ), ujconfig.WithReferenceInjectors([]ujconfig.ReferenceInjector{reference.NewInjector(modulePath)}), - ujconfig.WithTerraformProvider(p), ) for _, configure := range []func(provider *ujconfig.Provider){ @@ -118,6 +126,11 @@ func GetProvider(generationProvider bool) (*ujconfig.Provider, error) { lockbox.Configure, monitoring.Configure, loadtesting.Configure, + serverless.Configure, + iot.Configure, + function.Configure, + cm.Configure, + cdn.Configure, } { configure(pc) } @@ -132,7 +145,7 @@ func resourceList(t map[string]ujconfig.ExternalName) []string { l := make([]string, len(t)) i := 0 for n := range t { - // Expected format is regex and we'd like to have exact matches. + // Expected format is regex, and we'd like to have exact matches. l[i] = n + "$" i++ } diff --git a/config/serverless/config.go b/config/serverless/config.go new file mode 100644 index 0000000..08928a2 --- /dev/null +++ b/config/serverless/config.go @@ -0,0 +1,15 @@ +package serverless + +import ( + ujconfig "github.com/crossplane/upjet/pkg/config" +) + +const ( + // ApisPackagePath is the golang path for this package. + ApisPackagePath = "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" + // ConfigPath is the golang path for this package. + ConfigPath = "github.com/tagesjump/provider-upjet-yc/config/serverless" +) + +// Configure adds configurations for serverless group. +func Configure(p *ujconfig.Provider) {} diff --git a/examples-generated/api/v1alpha1/gateway.yaml b/examples-generated/api/v1alpha1/gateway.yaml new file mode 100644 index 0000000..16c0731 --- /dev/null +++ b/examples-generated/api/v1alpha1/gateway.yaml @@ -0,0 +1,69 @@ +apiVersion: api.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Gateway +metadata: + annotations: + meta.upbound.io/example-id: api/v1alpha1/gateway + labels: + testing.upbound.io/example-name: test-api-gateway + name: test-api-gateway +spec: + forProvider: + canary: + - variables: + installation: dev + weight: 20 + connectivity: + - networkId: + customDomains: + - certificateId: + fqdn: test.example.com + description: any description + executionTimeout: "300" + labels: + empty-label: "" + label: label + logOptions: + - logGroupId: + minLevel: ERROR + name: some_name + spec: | + openapi: "3.0.0" + info: + version: 1.0.0 + title: Test API + x-yc-apigateway: + variables: + installation: + default: "prod" + enum: + - "prod" + - "dev" + paths: + /hello: + get: + summary: Say hello + operationId: hello + parameters: + - name: user + in: query + description: User name to appear in greetings + required: false + schema: + type: string + default: 'world' + responses: + '200': + description: Greeting + content: + 'text/plain': + schema: + type: "string" + x-yc-apigateway-integration: + type: dummy + http_code: 200 + http_headers: + 'Content-Type': "text/plain" + content: + 'text/plain': "Hello again, {user} from ${apigw.installation} release!\n" + variables: + installation: prod diff --git a/examples-generated/backup/v1alpha1/policy.yaml b/examples-generated/backup/v1alpha1/policy.yaml new file mode 100644 index 0000000..e039135 --- /dev/null +++ b/examples-generated/backup/v1alpha1/policy.yaml @@ -0,0 +1,20 @@ +apiVersion: backup.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: backup/v1alpha1/policy + labels: + testing.upbound.io/example-name: basic_policy + name: basic-policy +spec: + forProvider: + name: basic policy + reattempts: + - {} + retention: + - afterBackup: false + scheduling: + - enabled: false + executeByInterval: 86400 + vmSnapshotReattempts: + - {} diff --git a/examples-generated/billing/v1alpha1/cloudbinding.yaml b/examples-generated/billing/v1alpha1/cloudbinding.yaml new file mode 100644 index 0000000..aa9b330 --- /dev/null +++ b/examples-generated/billing/v1alpha1/cloudbinding.yaml @@ -0,0 +1,12 @@ +apiVersion: billing.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CloudBinding +metadata: + annotations: + meta.upbound.io/example-id: billing/v1alpha1/cloudbinding + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + billingAccountId: foo-ba-id + cloudId: foo-cloud-id diff --git a/examples-generated/cdn/v1alpha1/origingroup.yaml b/examples-generated/cdn/v1alpha1/origingroup.yaml new file mode 100644 index 0000000..3843787 --- /dev/null +++ b/examples-generated/cdn/v1alpha1/origingroup.yaml @@ -0,0 +1,18 @@ +apiVersion: cdn.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: OriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1alpha1/origingroup + labels: + testing.upbound.io/example-name: my_group + name: my-group +spec: + forProvider: + name: My Origin group + origin: + - source: ya.ru + - source: yandex.ru + - source: goo.gl + - backup: false + source: amazon.com + useNext: true diff --git a/examples-generated/cdn/v1alpha1/resource.yaml b/examples-generated/cdn/v1alpha1/resource.yaml new file mode 100644 index 0000000..2a10e41 --- /dev/null +++ b/examples-generated/cdn/v1alpha1/resource.yaml @@ -0,0 +1,26 @@ +apiVersion: cdn.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Resource +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1alpha1/resource + labels: + testing.upbound.io/example-name: my_resource + name: my-resource +spec: + forProvider: + active: false + cname: cdn1.yandex-example.ru + options: + - edgeCacheSettings: 345600 + ignoreCookie: true + staticRequestHeaders: + is-from-cdn: "yes" + staticResponseHeaders: + is-cdn: "yes" + originGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: foo_cdn_group_by_id + originProtocol: https + secondaryHostnames: + - cdn-example-1.yandex.ru + - cdn-example-2.yandex.ru diff --git a/examples-generated/cm/v1alpha1/certificate.yaml b/examples-generated/cm/v1alpha1/certificate.yaml new file mode 100644 index 0000000..0261027 --- /dev/null +++ b/examples-generated/cm/v1alpha1/certificate.yaml @@ -0,0 +1,15 @@ +apiVersion: cm.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Certificate +metadata: + annotations: + meta.upbound.io/example-id: cm/v1alpha1/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + domains: + - example.com + managed: + - challengeType: DNS_CNAME + name: example diff --git a/examples-generated/compute/v1alpha1/diskiambinding.yaml b/examples-generated/compute/v1alpha1/diskiambinding.yaml new file mode 100644 index 0000000..4eecd42 --- /dev/null +++ b/examples-generated/compute/v1alpha1/diskiambinding.yaml @@ -0,0 +1,35 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + diskId: ${data.yandex_compute_disk.disk1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Disk +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskiambinding + labels: + testing.upbound.io/example-name: disk1 + name: disk1 +spec: + forProvider: + imageIdSelector: + matchLabels: + testing.upbound.io/example-name: example + labels: + environment: test + name: disk-name + type: network-ssd + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/diskplacementgroupiambinding.yaml b/examples-generated/compute/v1alpha1/diskplacementgroupiambinding.yaml new file mode 100644 index 0000000..fe2c29b --- /dev/null +++ b/examples-generated/compute/v1alpha1/diskplacementgroupiambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskPlacementGroupIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskplacementgroupiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + diskPlacementGroupId: ${data.yandex_compute_disk_placement_group.group1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: DiskPlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/diskplacementgroupiambinding + labels: + testing.upbound.io/example-name: group1 + name: group1 +spec: + forProvider: + description: my description + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test-pg diff --git a/examples-generated/compute/v1alpha1/filesystemiambinding.yaml b/examples-generated/compute/v1alpha1/filesystemiambinding.yaml new file mode 100644 index 0000000..215be1c --- /dev/null +++ b/examples-generated/compute/v1alpha1/filesystemiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FilesystemIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/filesystemiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + filesystemId: ${data.yandex_compute_filesystem.fs1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Filesystem +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/filesystemiambinding + labels: + testing.upbound.io/example-name: fs1 + name: fs1 +spec: + forProvider: + labels: + environment: test + name: fs-name + size: 10 + type: network-ssd + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/gpuclusteriambinding.yaml b/examples-generated/compute/v1alpha1/gpuclusteriambinding.yaml new file mode 100644 index 0000000..51df5b7 --- /dev/null +++ b/examples-generated/compute/v1alpha1/gpuclusteriambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GpuClusterIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/gpuclusteriambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + gpuClusterId: ${data.yandex_compute_gpu_cluster.cluster1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: GpuCluster +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/gpuclusteriambinding + labels: + testing.upbound.io/example-name: cluster1 + name: cluster1 +spec: + forProvider: + interconnectType: infiniband + labels: + environment: test + name: gpu-cluster-name + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/imageiambinding.yaml b/examples-generated/compute/v1alpha1/imageiambinding.yaml new file mode 100644 index 0000000..57283df --- /dev/null +++ b/examples-generated/compute/v1alpha1/imageiambinding.yaml @@ -0,0 +1,29 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ImageIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/imageiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + imageId: ${data.yandex_compute_image.image1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Image +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/imageiambinding + labels: + testing.upbound.io/example-name: image1 + name: image1 +spec: + forProvider: + name: my-custom-image + sourceUrl: https://storage.yandexcloud.net/lucky-images/kube-it.img diff --git a/examples-generated/compute/v1alpha1/instanceiambinding.yaml b/examples-generated/compute/v1alpha1/instanceiambinding.yaml new file mode 100644 index 0000000..21c5ce0 --- /dev/null +++ b/examples-generated/compute/v1alpha1/instanceiambinding.yaml @@ -0,0 +1,77 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: InstanceIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + instanceId: ${data.yandex_compute_instance.instance1.id} + members: + - userAccount:some_user_id + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: instance1 + name: instance1 +spec: + forProvider: + bootDisk: + - diskIdSelector: + matchLabels: + testing.upbound.io/example-name: boot-disk + metadata: + foo: bar + ssh-keys: ubuntu:${file("~/.ssh/id_rsa.pub")} + name: test + networkInterface: + - index: 1 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + platformId: standard-v1 + resources: + - cores: 2 + memory: 4 + zone: ru-central1-a + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/instanceiambinding + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.5.0.0/24 + zone: ru-central1-a diff --git a/examples-generated/compute/v1alpha1/placementgroupiambinding.yaml b/examples-generated/compute/v1alpha1/placementgroupiambinding.yaml new file mode 100644 index 0000000..e94f74a --- /dev/null +++ b/examples-generated/compute/v1alpha1/placementgroupiambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PlacementGroupIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/placementgroupiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + placementGroupId: ${data.yandex_compute_placement_group.pg1.id} + role: editor + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: PlacementGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/placementgroupiambinding + labels: + testing.upbound.io/example-name: pg1 + name: pg1 +spec: + forProvider: + description: my description + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: test-pg diff --git a/examples-generated/compute/v1alpha1/snapshotiambinding.yaml b/examples-generated/compute/v1alpha1/snapshotiambinding.yaml new file mode 100644 index 0000000..2810255 --- /dev/null +++ b/examples-generated/compute/v1alpha1/snapshotiambinding.yaml @@ -0,0 +1,33 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + role: editor + snapshotId: ${data.yandex_compute_snapshot.snapshot1.id} + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Snapshot +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotiambinding + labels: + testing.upbound.io/example-name: snapshot1 + name: snapshot1 +spec: + forProvider: + labels: + my-label: my-label-value + name: test-snapshot + sourceDiskIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1alpha1/snapshotscheduleiambinding.yaml b/examples-generated/compute/v1alpha1/snapshotscheduleiambinding.yaml new file mode 100644 index 0000000..2c3d6ed --- /dev/null +++ b/examples-generated/compute/v1alpha1/snapshotscheduleiambinding.yaml @@ -0,0 +1,35 @@ +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotScheduleIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotscheduleiambinding + labels: + testing.upbound.io/example-name: editor + name: editor +spec: + forProvider: + members: + - userAccount:some_user_id + role: editor + snapshotScheduleId: ${data.yandex_compute_snapshot_schedule.schedule1.id} + +--- + +apiVersion: compute.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: SnapshotSchedule +metadata: + annotations: + meta.upbound.io/example-id: compute/v1alpha1/snapshotscheduleiambinding + labels: + testing.upbound.io/example-name: schedule1 + name: schedule1 +spec: + forProvider: + diskIdsRefs: + - name: example + - name: example + retentionPeriod: 12h + schedulePolicy: + - expression: 0 0 * * * + snapshotSpec: + - description: retention-snapshot diff --git a/examples-generated/dataproc/v1alpha1/cluster.yaml b/examples-generated/dataproc/v1alpha1/cluster.yaml new file mode 100644 index 0000000..c7234e3 --- /dev/null +++ b/examples-generated/dataproc/v1alpha1/cluster.yaml @@ -0,0 +1,208 @@ +apiVersion: dataproc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: foo + clusterConfig: + - hadoop: + - initializationAction: + - args: + - arg1 + - arg2 + uri: s3a://yandex_storage_bucket.foo.bucket/scripts/script.sh + properties: + yarn:yarn.resourcemanager.am.max-attempts: 5 + services: + - HDFS + - YARN + - SPARK + - TEZ + - MAPREDUCE + - HIVE + sshPublicKeys: + - ${file("~/.ssh/id_rsa.pub")} + subclusterSpec: + - hostsCount: 1 + name: main + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: MASTERNODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + - hostsCount: 2 + name: data + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: DATANODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + - hostsCount: 2 + name: compute + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: COMPUTENODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + - autoscalingConfig: + - decommissionTimeout: 60 + maxHostsCount: 10 + measurementDuration: 60 + preemptible: false + stabilizationDuration: 120 + warmupDuration: 60 + hostsCount: 2 + name: compute_autoscaling + resources: + - diskSize: 20 + diskTypeId: network-hdd + resourcePresetId: s2.small + role: COMPUTENODE + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + description: Dataproc Cluster created by Terraform + labels: + created_by: terraform + name: dataproc-cluster + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: dataproc + zoneId: ru-central1-b + +--- + +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccount +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: dataproc + name: dataproc +spec: + forProvider: + description: service account to manage Dataproc Cluster + +--- + +apiVersion: iam.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ServiceAccountStaticAccessKey +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + serviceAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: dataproc + +--- + +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: bucket-creator + name: bucket-creator +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + members: + - serviceAccount:${yandex_iam_service_account.dataproc.id} + role: editor + +--- + +apiVersion: resourcemanager.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: FolderIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: dataproc + name: dataproc +spec: + forProvider: + folderIdSelector: + matchLabels: + testing.upbound.io/example-name: yandex_resourcemanager_folder + members: + - serviceAccount:${yandex_iam_service_account.dataproc.id} + role: mdb.dataproc.agent + +--- + +apiVersion: storage.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Bucket +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + accessKeySelector: + matchLabels: + testing.upbound.io/example-name: foo + bucket: foo + secretKeySecretRef: + key: attribute.secret_key + name: example-iam-service-account-static-access-key + namespace: upbound-system + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Network +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: {} + +--- + +apiVersion: vpc.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1alpha1/cluster + labels: + testing.upbound.io/example-name: foo + name: foo +spec: + forProvider: + networkIdSelector: + matchLabels: + testing.upbound.io/example-name: foo + v4CidrBlocks: + - 10.1.0.0/24 + zone: ru-central1-b diff --git a/examples-generated/function/v1alpha1/iambinding.yaml b/examples-generated/function/v1alpha1/iambinding.yaml new file mode 100644 index 0000000..27f464d --- /dev/null +++ b/examples-generated/function/v1alpha1/iambinding.yaml @@ -0,0 +1,14 @@ +apiVersion: function.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: IAMBinding +metadata: + annotations: + meta.upbound.io/example-id: function/v1alpha1/iambinding + labels: + testing.upbound.io/example-name: function-iam + name: function-iam +spec: + forProvider: + functionId: your-function-id + members: + - system:allUsers + role: serverless.functions.invoker diff --git a/examples-generated/function/v1alpha1/scalingpolicy.yaml b/examples-generated/function/v1alpha1/scalingpolicy.yaml new file mode 100644 index 0000000..5628a07 --- /dev/null +++ b/examples-generated/function/v1alpha1/scalingpolicy.yaml @@ -0,0 +1,18 @@ +apiVersion: function.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ScalingPolicy +metadata: + annotations: + meta.upbound.io/example-id: function/v1alpha1/scalingpolicy + labels: + testing.upbound.io/example-name: my_scaling_policy + name: my-scaling-policy +spec: + forProvider: + functionId: are1samplefunction11 + policy: + - tag: $latest + zoneInstancesLimit: 3 + zoneRequestsLimit: 100 + - tag: my_tag + zoneInstancesLimit: 4 + zoneRequestsLimit: 150 diff --git a/examples-generated/function/v1alpha1/trigger.yaml b/examples-generated/function/v1alpha1/trigger.yaml new file mode 100644 index 0000000..c424f25 --- /dev/null +++ b/examples-generated/function/v1alpha1/trigger.yaml @@ -0,0 +1,16 @@ +apiVersion: function.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Trigger +metadata: + annotations: + meta.upbound.io/example-id: function/v1alpha1/trigger + labels: + testing.upbound.io/example-name: my_trigger + name: my-trigger +spec: + forProvider: + description: any description + function: + - id: tf-test + name: some_name + timer: + - cronExpression: '* * * * ? *' diff --git a/examples-generated/iot/v1alpha1/corebroker.yaml b/examples-generated/iot/v1alpha1/corebroker.yaml new file mode 100644 index 0000000..aea69ce --- /dev/null +++ b/examples-generated/iot/v1alpha1/corebroker.yaml @@ -0,0 +1,20 @@ +apiVersion: iot.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CoreBroker +metadata: + annotations: + meta.upbound.io/example-id: iot/v1alpha1/corebroker + labels: + testing.upbound.io/example-name: my_broker + name: my-broker +spec: + forProvider: + certificates: + - public part of certificate1 + - public part of certificate2 + description: any description + labels: + my-label: my-label-value + logOptions: + - logGroupId: log-group-id + minLevel: ERROR + name: some_name diff --git a/examples-generated/iot/v1alpha1/coredevice.yaml b/examples-generated/iot/v1alpha1/coredevice.yaml new file mode 100644 index 0000000..70443a6 --- /dev/null +++ b/examples-generated/iot/v1alpha1/coredevice.yaml @@ -0,0 +1,23 @@ +apiVersion: iot.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CoreDevice +metadata: + annotations: + meta.upbound.io/example-id: iot/v1alpha1/coredevice + labels: + testing.upbound.io/example-name: my_device + name: my-device +spec: + forProvider: + aliases: + some_alias1/subtopic: $devices/{id}/events/somesubtopic + some_alias2/subtopic: $devices/{id}/events/aaa/bbb + certificates: + - public part of certificate1 + - public part of certificate2 + description: any description + name: some_name + passwordsSecretRef: + - key: example-key + name: example-secret + namespace: upbound-system + registryId: are1sampleregistryid11 diff --git a/examples-generated/iot/v1alpha1/coreregistry.yaml b/examples-generated/iot/v1alpha1/coreregistry.yaml new file mode 100644 index 0000000..ee54ff9 --- /dev/null +++ b/examples-generated/iot/v1alpha1/coreregistry.yaml @@ -0,0 +1,24 @@ +apiVersion: iot.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: CoreRegistry +metadata: + annotations: + meta.upbound.io/example-id: iot/v1alpha1/coreregistry + labels: + testing.upbound.io/example-name: my_registry + name: my-registry +spec: + forProvider: + certificates: + - public part of certificate1 + - public part of certificate2 + description: any description + labels: + my-label: my-label-value + logOptions: + - logGroupId: log-group-id + minLevel: ERROR + name: some_name + passwordsSecretRef: + - key: example-key + name: example-secret + namespace: upbound-system diff --git a/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml b/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml index a37cfcd..9655e63 100644 --- a/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml +++ b/examples-generated/mdb/v1alpha1/mongodbdatabase.yaml @@ -11,6 +11,7 @@ spec: clusterIdSelector: matchLabels: testing.upbound.io/example-name: foo + name: testdb --- diff --git a/examples-generated/mdb/v1alpha1/mongodbuser.yaml b/examples-generated/mdb/v1alpha1/mongodbuser.yaml index ad939a4..20077a8 100644 --- a/examples-generated/mdb/v1alpha1/mongodbuser.yaml +++ b/examples-generated/mdb/v1alpha1/mongodbuser.yaml @@ -11,6 +11,7 @@ spec: clusterIdSelector: matchLabels: testing.upbound.io/example-name: foo + name: alice passwordSecretRef: key: example-key name: example-secret diff --git a/examples-generated/serverless/v1alpha1/container.yaml b/examples-generated/serverless/v1alpha1/container.yaml new file mode 100644 index 0000000..4994ba7 --- /dev/null +++ b/examples-generated/serverless/v1alpha1/container.yaml @@ -0,0 +1,27 @@ +apiVersion: serverless.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: serverless/v1alpha1/container + labels: + testing.upbound.io/example-name: test-container + name: test-container +spec: + forProvider: + coreFraction: 100 + cores: 1 + description: any description + executionTimeout: 15s + image: + - url: cr.yandex/yc/test-image:v1 + logOptions: + - logGroupId: e2392vo6d1bne2aeq9fr + minLevel: ERROR + memory: 256 + name: some_name + secrets: + - environmentVariable: ENV_VARIABLE + id: ${yandex_lockbox_secret.secret.id} + key: secret-key + versionId: ${yandex_lockbox_secret_version.secret_version.id} + serviceAccountId: are1service2account3id diff --git a/examples-generated/serverless/v1alpha1/containeriambinding.yaml b/examples-generated/serverless/v1alpha1/containeriambinding.yaml new file mode 100644 index 0000000..46658b8 --- /dev/null +++ b/examples-generated/serverless/v1alpha1/containeriambinding.yaml @@ -0,0 +1,14 @@ +apiVersion: serverless.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: ContainerIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: serverless/v1alpha1/containeriambinding + labels: + testing.upbound.io/example-name: container-iam + name: container-iam +spec: + forProvider: + containerId: your-container-id + members: + - system:allUsers + role: serverless.containers.invoker diff --git a/examples-generated/yandex/v1alpha1/function.yaml b/examples-generated/yandex/v1alpha1/function.yaml new file mode 100644 index 0000000..03d6aac --- /dev/null +++ b/examples-generated/yandex/v1alpha1/function.yaml @@ -0,0 +1,39 @@ +apiVersion: yandex.yandex-cloud.upjet.crossplane.io/v1alpha1 +kind: Function +metadata: + annotations: + meta.upbound.io/example-id: yandex/v1alpha1/function + labels: + testing.upbound.io/example-name: test-function + name: test-function +spec: + forProvider: + asyncInvocation: + - retriesCount: "3" + services_account_id: ajeihp9qsfg2l6f838kk + ymqFailureTarget: + - arn: yrn:yc:ymq:ru-central1:b1glraqqa1i7tmh9hsfp:fail + serviceAccountId: ajeqr0pjpbrkovcqb76m + ymqSuccessTarget: + - arn: yrn:yc:ymq:ru-central1:b1glraqqa1i7tmh9hsfp:success + serviceAccountId: ajeqr0pjpbrkovcqb76m + content: + - zipFilename: function.zip + description: any description + entrypoint: main + executionTimeout: "10" + logOptions: + - logGroupId: e2392vo6d1bne2aeq9fr + minLevel: ERROR + memory: "128" + name: some_name + runtime: python37 + secrets: + - environmentVariable: ENV_VARIABLE + id: ${yandex_lockbox_secret.secret.id} + key: secret-key + versionId: ${yandex_lockbox_secret_version.secret_version.id} + serviceAccountId: are1service2account3id + tags: + - my_tag + userHash: any_user_defined_string diff --git a/go.mod b/go.mod index 6086d4f..45a0fd2 100644 --- a/go.mod +++ b/go.mod @@ -74,6 +74,8 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.20.0 // indirect github.com/hashicorp/terraform-plugin-framework v1.7.0 // indirect + github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.10.0 // indirect github.com/hashicorp/terraform-plugin-go v0.22.1 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect github.com/hashicorp/terraform-plugin-testing v1.5.1 // indirect diff --git a/go.sum b/go.sum index 1f548a1..58dd5e6 100644 --- a/go.sum +++ b/go.sum @@ -213,6 +213,10 @@ github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRy github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= github.com/hashicorp/terraform-plugin-framework v1.7.0 h1:wOULbVmfONnJo9iq7/q+iBOBJul5vRovaYJIu2cY/Pw= github.com/hashicorp/terraform-plugin-framework v1.7.0/go.mod h1:jY9Id+3KbZ17OMpulgnWLSfwxNVYSoYBQFTgsx044CI= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= +github.com/hashicorp/terraform-plugin-framework-validators v0.10.0 h1:4L0tmy/8esP6OcvocVymw52lY0HyQ5OxB7VNl7k4bS0= +github.com/hashicorp/terraform-plugin-framework-validators v0.10.0/go.mod h1:qdQJCdimB9JeX2YwOpItEu+IrfoJjWQ5PhLpAOMDQAE= github.com/hashicorp/terraform-plugin-go v0.22.1 h1:iTS7WHNVrn7uhe3cojtvWWn83cm2Z6ryIUDTRO0EV7w= github.com/hashicorp/terraform-plugin-go v0.22.1/go.mod h1:qrjnqRghvQ6KnDbB12XeZ4FluclYwptntoWCr9QaXTI= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= diff --git a/internal/controller/api/gateway/zz_controller.go b/internal/controller/api/gateway/zz_controller.go new file mode 100755 index 0000000..ded5ec9 --- /dev/null +++ b/internal/controller/api/gateway/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package gateway + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/api/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Gateway managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Gateway_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Gateway_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Gateway_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_api_gateway"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Gateway_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Gateway + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Gateway{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Gateway") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GatewayList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GatewayList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Gateway_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Gateway{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/backup/policy/zz_controller.go b/internal/controller/backup/policy/zz_controller.go new file mode 100755 index 0000000..3f1fb25 --- /dev/null +++ b/internal/controller/backup/policy/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package policy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/backup/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Policy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Policy_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Policy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_backup_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Policy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Policy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Policy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Policy") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PolicyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PolicyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Policy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Policy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/billing/cloudbinding/zz_controller.go b/internal/controller/billing/cloudbinding/zz_controller.go new file mode 100755 index 0000000..90d2d20 --- /dev/null +++ b/internal/controller/billing/cloudbinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package cloudbinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/billing/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles CloudBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.CloudBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.CloudBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CloudBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_billing_cloud_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CloudBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.CloudBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.CloudBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.CloudBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CloudBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CloudBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.CloudBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.CloudBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/cdn/origingroup/zz_controller.go b/internal/controller/cdn/origingroup/zz_controller.go new file mode 100755 index 0000000..4cdaf9a --- /dev/null +++ b/internal/controller/cdn/origingroup/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package origingroup + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles OriginGroup managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.OriginGroup_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.OriginGroup_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.OriginGroup_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cdn_origin_group"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.OriginGroup_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.OriginGroup + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.OriginGroup{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.OriginGroup") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.OriginGroupList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.OriginGroupList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.OriginGroup_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.OriginGroup{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/cdn/resource/zz_controller.go b/internal/controller/cdn/resource/zz_controller.go new file mode 100755 index 0000000..82e7831 --- /dev/null +++ b/internal/controller/cdn/resource/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package resource + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/cdn/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Resource managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Resource_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Resource_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Resource_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cdn_resource"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Resource_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Resource + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Resource{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Resource") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ResourceList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ResourceList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Resource_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Resource{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/cm/certificate/zz_controller.go b/internal/controller/cm/certificate/zz_controller.go new file mode 100755 index 0000000..7cafba6 --- /dev/null +++ b/internal/controller/cm/certificate/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package certificate + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/cm/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Certificate managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Certificate_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Certificate_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Certificate_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_cm_certificate"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Certificate_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Certificate + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Certificate{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Certificate") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CertificateList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CertificateList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Certificate_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Certificate{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/diskiambinding/zz_controller.go b/internal/controller/compute/diskiambinding/zz_controller.go new file mode 100755 index 0000000..a69d8d9 --- /dev/null +++ b/internal/controller/compute/diskiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package diskiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DiskIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DiskIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DiskIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DiskIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_disk_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DiskIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DiskIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DiskIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DiskIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DiskIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DiskIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DiskIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DiskIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/diskplacementgroupiambinding/zz_controller.go b/internal/controller/compute/diskplacementgroupiambinding/zz_controller.go new file mode 100755 index 0000000..a4953ca --- /dev/null +++ b/internal/controller/compute/diskplacementgroupiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package diskplacementgroupiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles DiskPlacementGroupIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_disk_placement_group_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.DiskPlacementGroupIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.DiskPlacementGroupIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.DiskPlacementGroupIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.DiskPlacementGroupIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.DiskPlacementGroupIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.DiskPlacementGroupIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.DiskPlacementGroupIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/filesystemiambinding/zz_controller.go b/internal/controller/compute/filesystemiambinding/zz_controller.go new file mode 100755 index 0000000..ec1fa8b --- /dev/null +++ b/internal/controller/compute/filesystemiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package filesystemiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles FilesystemIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.FilesystemIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.FilesystemIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.FilesystemIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_filesystem_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.FilesystemIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.FilesystemIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.FilesystemIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.FilesystemIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.FilesystemIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.FilesystemIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.FilesystemIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.FilesystemIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/gpuclusteriambinding/zz_controller.go b/internal/controller/compute/gpuclusteriambinding/zz_controller.go new file mode 100755 index 0000000..071e877 --- /dev/null +++ b/internal/controller/compute/gpuclusteriambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package gpuclusteriambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles GpuClusterIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.GpuClusterIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.GpuClusterIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.GpuClusterIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_gpu_cluster_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.GpuClusterIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.GpuClusterIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.GpuClusterIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.GpuClusterIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.GpuClusterIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.GpuClusterIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.GpuClusterIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.GpuClusterIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/imageiambinding/zz_controller.go b/internal/controller/compute/imageiambinding/zz_controller.go new file mode 100755 index 0000000..653eb30 --- /dev/null +++ b/internal/controller/compute/imageiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package imageiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ImageIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ImageIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ImageIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ImageIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_image_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ImageIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ImageIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ImageIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ImageIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ImageIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ImageIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ImageIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ImageIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/instanceiambinding/zz_controller.go b/internal/controller/compute/instanceiambinding/zz_controller.go new file mode 100755 index 0000000..5ff7743 --- /dev/null +++ b/internal/controller/compute/instanceiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package instanceiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles InstanceIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.InstanceIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.InstanceIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.InstanceIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_instance_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.InstanceIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.InstanceIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.InstanceIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.InstanceIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.InstanceIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.InstanceIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.InstanceIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.InstanceIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/placementgroupiambinding/zz_controller.go b/internal/controller/compute/placementgroupiambinding/zz_controller.go new file mode 100755 index 0000000..fbff05e --- /dev/null +++ b/internal/controller/compute/placementgroupiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package placementgroupiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles PlacementGroupIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.PlacementGroupIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_placement_group_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.PlacementGroupIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.PlacementGroupIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.PlacementGroupIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.PlacementGroupIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.PlacementGroupIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.PlacementGroupIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.PlacementGroupIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/snapshotiambinding/zz_controller.go b/internal/controller/compute/snapshotiambinding/zz_controller.go new file mode 100755 index 0000000..83efcde --- /dev/null +++ b/internal/controller/compute/snapshotiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package snapshotiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SnapshotIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SnapshotIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SnapshotIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SnapshotIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_snapshot_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SnapshotIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SnapshotIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SnapshotIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SnapshotIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SnapshotIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SnapshotIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SnapshotIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SnapshotIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/compute/snapshotscheduleiambinding/zz_controller.go b/internal/controller/compute/snapshotscheduleiambinding/zz_controller.go new file mode 100755 index 0000000..bf366ad --- /dev/null +++ b/internal/controller/compute/snapshotscheduleiambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package snapshotscheduleiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/compute/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles SnapshotScheduleIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_compute_snapshot_schedule_iam_binding"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.SnapshotScheduleIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.SnapshotScheduleIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.SnapshotScheduleIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.SnapshotScheduleIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.SnapshotScheduleIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.SnapshotScheduleIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.SnapshotScheduleIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dataproc/cluster/zz_controller.go b/internal/controller/dataproc/cluster/zz_controller.go new file mode 100755 index 0000000..7ca14a4 --- /dev/null +++ b/internal/controller/dataproc/cluster/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package cluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/dataproc/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Cluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Cluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Cluster_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_dataproc_cluster"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Cluster_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Cluster + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Cluster{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Cluster") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ClusterList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ClusterList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Cluster_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Cluster{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/function/iambinding/zz_controller.go b/internal/controller/function/iambinding/zz_controller.go new file mode 100755 index 0000000..283fc8c --- /dev/null +++ b/internal/controller/function/iambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package iambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles IAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.IAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.IAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.IAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.IAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.IAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.IAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.IAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.IAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.IAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.IAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.IAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/function/scalingpolicy/zz_controller.go b/internal/controller/function/scalingpolicy/zz_controller.go new file mode 100755 index 0000000..1b7f00f --- /dev/null +++ b/internal/controller/function/scalingpolicy/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package scalingpolicy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ScalingPolicy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ScalingPolicy_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ScalingPolicy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ScalingPolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_scaling_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ScalingPolicy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ScalingPolicy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ScalingPolicy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ScalingPolicy") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ScalingPolicyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ScalingPolicyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ScalingPolicy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ScalingPolicy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/function/trigger/zz_controller.go b/internal/controller/function/trigger/zz_controller.go new file mode 100755 index 0000000..032ed30 --- /dev/null +++ b/internal/controller/function/trigger/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package trigger + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/function/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Trigger managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Trigger_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Trigger_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Trigger_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function_trigger"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Trigger_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Trigger + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Trigger{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Trigger") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.TriggerList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.TriggerList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Trigger_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Trigger{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iot/corebroker/zz_controller.go b/internal/controller/iot/corebroker/zz_controller.go new file mode 100755 index 0000000..6c1c8ee --- /dev/null +++ b/internal/controller/iot/corebroker/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package corebroker + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles CoreBroker managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.CoreBroker_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.CoreBroker_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CoreBroker_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_broker"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreBroker_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.CoreBroker + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.CoreBroker{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.CoreBroker") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CoreBrokerList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CoreBrokerList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.CoreBroker_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.CoreBroker{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iot/coredevice/zz_controller.go b/internal/controller/iot/coredevice/zz_controller.go new file mode 100755 index 0000000..b0831c7 --- /dev/null +++ b/internal/controller/iot/coredevice/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package coredevice + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles CoreDevice managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.CoreDevice_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.CoreDevice_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CoreDevice_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_device"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreDevice_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.CoreDevice + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.CoreDevice{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.CoreDevice") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CoreDeviceList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CoreDeviceList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.CoreDevice_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.CoreDevice{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/iot/coreregistry/zz_controller.go b/internal/controller/iot/coreregistry/zz_controller.go new file mode 100755 index 0000000..c232a6a --- /dev/null +++ b/internal/controller/iot/coreregistry/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package coreregistry + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/iot/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles CoreRegistry managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.CoreRegistry_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.CoreRegistry_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.CoreRegistry_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_iot_core_registry"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.CoreRegistry_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.CoreRegistry + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.CoreRegistry{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.CoreRegistry") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.CoreRegistryList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.CoreRegistryList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.CoreRegistry_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.CoreRegistry{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/mdb/mongodbdatabase/zz_controller.go b/internal/controller/mdb/mongodbdatabase/zz_controller.go index 83e7f71..c352a0b 100755 --- a/internal/controller/mdb/mongodbdatabase/zz_controller.go +++ b/internal/controller/mdb/mongodbdatabase/zz_controller.go @@ -13,7 +13,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" "github.com/crossplane/upjet/pkg/controller/handler" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -25,20 +25,23 @@ import ( func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { name := managed.ControllerName(v1alpha1.MongodbDatabase_GroupVersionKind.String()) var initializers managed.InitializerChain - initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} if o.SecretStoreConfigGVK != nil { cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) } eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.MongodbDatabase_GroupVersionKind))) - ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbDatabase_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ - managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_database"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), - tjcontroller.WithCallbackProvider(ac), - )), + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_database"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbDatabase_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), - managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), diff --git a/internal/controller/mdb/mongodbuser/zz_controller.go b/internal/controller/mdb/mongodbuser/zz_controller.go index a827345..2ae0f2b 100755 --- a/internal/controller/mdb/mongodbuser/zz_controller.go +++ b/internal/controller/mdb/mongodbuser/zz_controller.go @@ -13,7 +13,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" "github.com/crossplane/upjet/pkg/controller/handler" - "github.com/crossplane/upjet/pkg/terraform" + "github.com/crossplane/upjet/pkg/metrics" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -25,20 +25,23 @@ import ( func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { name := managed.ControllerName(v1alpha1.MongodbUser_GroupVersionKind.String()) var initializers managed.InitializerChain - initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} if o.SecretStoreConfigGVK != nil { cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) } eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.MongodbUser_GroupVersionKind))) - ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler)) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.MongodbUser_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) opts := []managed.ReconcilerOption{ - managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_user"], tjcontroller.WithLogger(o.Logger), tjcontroller.WithConnectorEventHandler(eventHandler), - tjcontroller.WithCallbackProvider(ac), - )), + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginFrameworkAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_mdb_mongodb_user"], + tjcontroller.WithTerraformPluginFrameworkAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginFrameworkAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginFrameworkAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginFrameworkAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.MongodbUser_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginFrameworkAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), - managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), managed.WithTimeout(3 * time.Minute), managed.WithInitializers(initializers), managed.WithConnectionPublishers(cps...), diff --git a/internal/controller/serverless/container/zz_controller.go b/internal/controller/serverless/container/zz_controller.go new file mode 100755 index 0000000..775323e --- /dev/null +++ b/internal/controller/serverless/container/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package container + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Container managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Container_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Container_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Container_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_serverless_container"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Container_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Container + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Container{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Container") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ContainerList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ContainerList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Container_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Container{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/serverless/containeriambinding/zz_controller.go b/internal/controller/serverless/containeriambinding/zz_controller.go new file mode 100755 index 0000000..f2f6411 --- /dev/null +++ b/internal/controller/serverless/containeriambinding/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package containeriambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/serverless/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles ContainerIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.ContainerIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.ContainerIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.ContainerIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_serverless_container_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.ContainerIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.ContainerIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.ContainerIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.ContainerIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.ContainerIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.ContainerIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.ContainerIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.ContainerIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/yandex/function/zz_controller.go b/internal/controller/yandex/function/zz_controller.go new file mode 100755 index 0000000..74fb2cb --- /dev/null +++ b/internal/controller/yandex/function/zz_controller.go @@ -0,0 +1,87 @@ +// Code generated by upjet. DO NOT EDIT. + +package function + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1alpha1 "github.com/tagesjump/provider-upjet-yc/apis/yandex/v1alpha1" + features "github.com/tagesjump/provider-upjet-yc/internal/features" +) + +// Setup adds a controller that reconciles Function managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1alpha1.Function_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1alpha1.Function_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1alpha1.Function_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["yandex_function"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1alpha1.Function_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1alpha1.Function + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1alpha1.Function{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1alpha1.Function") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1alpha1.FunctionList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1alpha1.FunctionList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1alpha1.Function_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1alpha1.Function{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index e70117d..37b282b 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -10,28 +10,47 @@ import ( loadbalancer "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/loadbalancer" targetgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/targetgroup" virtualhost "github.com/tagesjump/provider-upjet-yc/internal/controller/alb/virtualhost" + gateway "github.com/tagesjump/provider-upjet-yc/internal/controller/api/gateway" trailstrail "github.com/tagesjump/provider-upjet-yc/internal/controller/audit/trailstrail" + policy "github.com/tagesjump/provider-upjet-yc/internal/controller/backup/policy" + cloudbinding "github.com/tagesjump/provider-upjet-yc/internal/controller/billing/cloudbinding" + origingroup "github.com/tagesjump/provider-upjet-yc/internal/controller/cdn/origingroup" + resource "github.com/tagesjump/provider-upjet-yc/internal/controller/cdn/resource" + certificate "github.com/tagesjump/provider-upjet-yc/internal/controller/cm/certificate" disk "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/disk" + diskiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/diskiambinding" diskplacementgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/diskplacementgroup" + diskplacementgroupiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/diskplacementgroupiambinding" filesystem "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/filesystem" + filesystemiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/filesystemiambinding" gpucluster "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/gpucluster" + gpuclusteriambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/gpuclusteriambinding" image "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/image" + imageiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/imageiambinding" instance "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/instance" instancegroup "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/instancegroup" + instanceiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/instanceiambinding" placementgroup "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/placementgroup" + placementgroupiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/placementgroupiambinding" snapshot "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshot" + snapshotiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshotiambinding" snapshotschedule "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshotschedule" + snapshotscheduleiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/compute/snapshotscheduleiambinding" registry "github.com/tagesjump/provider-upjet-yc/internal/controller/container/registry" registryiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/container/registryiambinding" registryippermission "github.com/tagesjump/provider-upjet-yc/internal/controller/container/registryippermission" repository "github.com/tagesjump/provider-upjet-yc/internal/controller/container/repository" repositoryiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/container/repositoryiambinding" repositorylifecyclepolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/container/repositorylifecyclepolicy" + cluster "github.com/tagesjump/provider-upjet-yc/internal/controller/dataproc/cluster" endpoint "github.com/tagesjump/provider-upjet-yc/internal/controller/datatransfer/endpoint" transfer "github.com/tagesjump/provider-upjet-yc/internal/controller/datatransfer/transfer" recordset "github.com/tagesjump/provider-upjet-yc/internal/controller/dns/recordset" zone "github.com/tagesjump/provider-upjet-yc/internal/controller/dns/zone" zoneiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/dns/zoneiambinding" + iambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/function/iambinding" + scalingpolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/function/scalingpolicy" + trigger "github.com/tagesjump/provider-upjet-yc/internal/controller/function/trigger" serviceaccount "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccount" serviceaccountapikey "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountapikey" serviceaccountiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountiambinding" @@ -39,6 +58,9 @@ import ( serviceaccountiampolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountiampolicy" serviceaccountkey "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountkey" serviceaccountstaticaccesskey "github.com/tagesjump/provider-upjet-yc/internal/controller/iam/serviceaccountstaticaccesskey" + corebroker "github.com/tagesjump/provider-upjet-yc/internal/controller/iot/corebroker" + coredevice "github.com/tagesjump/provider-upjet-yc/internal/controller/iot/coredevice" + coreregistry "github.com/tagesjump/provider-upjet-yc/internal/controller/iot/coreregistry" asymmetricencryptionkey "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricencryptionkey" asymmetricencryptionkeyiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricencryptionkeyiambinding" asymmetricsignaturekey "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/asymmetricsignaturekey" @@ -46,7 +68,7 @@ import ( secretciphertext "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/secretciphertext" symmetrickey "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/symmetrickey" symmetrickeyiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/kms/symmetrickeyiambinding" - cluster "github.com/tagesjump/provider-upjet-yc/internal/controller/kubernetes/cluster" + clusterkubernetes "github.com/tagesjump/provider-upjet-yc/internal/controller/kubernetes/cluster" nodegroup "github.com/tagesjump/provider-upjet-yc/internal/controller/kubernetes/nodegroup" networkloadbalancer "github.com/tagesjump/provider-upjet-yc/internal/controller/lb/networkloadbalancer" targetgrouplb "github.com/tagesjump/provider-upjet-yc/internal/controller/lb/targetgroup" @@ -93,18 +115,21 @@ import ( folderiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folderiambinding" folderiammember "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folderiammember" folderiampolicy "github.com/tagesjump/provider-upjet-yc/internal/controller/resourcemanager/folderiampolicy" + container "github.com/tagesjump/provider-upjet-yc/internal/controller/serverless/container" + containeriambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/serverless/containeriambinding" captcha "github.com/tagesjump/provider-upjet-yc/internal/controller/smartcaptcha/captcha" bucket "github.com/tagesjump/provider-upjet-yc/internal/controller/storage/bucket" object "github.com/tagesjump/provider-upjet-yc/internal/controller/storage/object" securityprofile "github.com/tagesjump/provider-upjet-yc/internal/controller/sws/securityprofile" address "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/address" defaultsecuritygroup "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/defaultsecuritygroup" - gateway "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/gateway" + gatewayvpc "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/gateway" network "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/network" routetable "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/routetable" securitygroup "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/securitygroup" securitygrouprule "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/securitygrouprule" subnet "github.com/tagesjump/provider-upjet-yc/internal/controller/vpc/subnet" + function "github.com/tagesjump/provider-upjet-yc/internal/controller/yandex/function" databasededicated "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/databasededicated" databaseiambinding "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/databaseiambinding" databaseserverless "github.com/tagesjump/provider-upjet-yc/internal/controller/ydb/databaseserverless" @@ -123,28 +148,47 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { loadbalancer.Setup, targetgroup.Setup, virtualhost.Setup, + gateway.Setup, trailstrail.Setup, + policy.Setup, + cloudbinding.Setup, + origingroup.Setup, + resource.Setup, + certificate.Setup, disk.Setup, + diskiambinding.Setup, diskplacementgroup.Setup, + diskplacementgroupiambinding.Setup, filesystem.Setup, + filesystemiambinding.Setup, gpucluster.Setup, + gpuclusteriambinding.Setup, image.Setup, + imageiambinding.Setup, instance.Setup, instancegroup.Setup, + instanceiambinding.Setup, placementgroup.Setup, + placementgroupiambinding.Setup, snapshot.Setup, + snapshotiambinding.Setup, snapshotschedule.Setup, + snapshotscheduleiambinding.Setup, registry.Setup, registryiambinding.Setup, registryippermission.Setup, repository.Setup, repositoryiambinding.Setup, repositorylifecyclepolicy.Setup, + cluster.Setup, endpoint.Setup, transfer.Setup, recordset.Setup, zone.Setup, zoneiambinding.Setup, + iambinding.Setup, + scalingpolicy.Setup, + trigger.Setup, serviceaccount.Setup, serviceaccountapikey.Setup, serviceaccountiambinding.Setup, @@ -152,6 +196,9 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { serviceaccountiampolicy.Setup, serviceaccountkey.Setup, serviceaccountstaticaccesskey.Setup, + corebroker.Setup, + coredevice.Setup, + coreregistry.Setup, asymmetricencryptionkey.Setup, asymmetricencryptionkeyiambinding.Setup, asymmetricsignaturekey.Setup, @@ -159,7 +206,7 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { secretciphertext.Setup, symmetrickey.Setup, symmetrickeyiambinding.Setup, - cluster.Setup, + clusterkubernetes.Setup, nodegroup.Setup, networkloadbalancer.Setup, targetgrouplb.Setup, @@ -206,18 +253,21 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { folderiambinding.Setup, folderiammember.Setup, folderiampolicy.Setup, + container.Setup, + containeriambinding.Setup, captcha.Setup, bucket.Setup, object.Setup, securityprofile.Setup, address.Setup, defaultsecuritygroup.Setup, - gateway.Setup, + gatewayvpc.Setup, network.Setup, routetable.Setup, securitygroup.Setup, securitygrouprule.Setup, subnet.Setup, + function.Setup, databasededicated.Setup, databaseiambinding.Setup, databaseserverless.Setup, diff --git a/package/crds/api.yandex-cloud.upjet.crossplane.io_gateways.yaml b/package/crds/api.yandex-cloud.upjet.crossplane.io_gateways.yaml new file mode 100644 index 0000000..16f7ac1 --- /dev/null +++ b/package/crds/api.yandex-cloud.upjet.crossplane.io_gateways.yaml @@ -0,0 +1,783 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: gateways.api.yandex-cloud.upjet.crossplane.io +spec: + group: api.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Gateway + listKind: GatewayList + plural: gateways + singular: gateway + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Gateway is the Schema for the Gateways API. Allows management + of a Yandex Cloud API Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GatewaySpec defines the desired state of Gateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + canary: + description: Canary release settings of gateway. + items: + properties: + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + weight: + description: Percentage of requests, which will be processed + by canary release. + type: number + type: object + type: array + connectivity: + description: Gateway connectivity. If specified the gateway will + be attached to specified network. + items: + properties: + networkId: + description: Network the gateway will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + customDomains: + description: Set of custom domains to be attached to Yandex API + Gateway. + items: + properties: + certificateId: + type: string + domainId: + type: string + fqdn: + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud API Gateway. + type: string + executionTimeout: + description: Execution timeout in seconds for the Yandex Cloud + API Gateway. + type: string + folderId: + description: Folder ID for the Yandex Cloud API Gateway. If it + is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud API Gateway. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud API Gateway. + items: + properties: + disabled: + description: Is logging from API Gateway disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud API Gateway. + If it is not provided, the default provider folder is + used. + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: Yandex Cloud API Gateway name used to define API + Gateway. + type: string + spec: + description: OpenAPI specification for Yandex API Gateway. + type: string + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + canary: + description: Canary release settings of gateway. + items: + properties: + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + weight: + description: Percentage of requests, which will be processed + by canary release. + type: number + type: object + type: array + connectivity: + description: Gateway connectivity. If specified the gateway will + be attached to specified network. + items: + properties: + networkId: + description: Network the gateway will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + customDomains: + description: Set of custom domains to be attached to Yandex API + Gateway. + items: + properties: + certificateId: + type: string + domainId: + type: string + fqdn: + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud API Gateway. + type: string + executionTimeout: + description: Execution timeout in seconds for the Yandex Cloud + API Gateway. + type: string + folderId: + description: Folder ID for the Yandex Cloud API Gateway. If it + is not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud API Gateway. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud API Gateway. + items: + properties: + disabled: + description: Is logging from API Gateway disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud API Gateway. + If it is not provided, the default provider folder is + used. + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: Yandex Cloud API Gateway name used to define API + Gateway. + type: string + spec: + description: OpenAPI specification for Yandex API Gateway. + type: string + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.spec is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.spec) + || (has(self.initProvider) && has(self.initProvider.spec))' + status: + description: GatewayStatus defines the observed state of Gateway. + properties: + atProvider: + properties: + canary: + description: Canary release settings of gateway. + items: + properties: + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + weight: + description: Percentage of requests, which will be processed + by canary release. + type: number + type: object + type: array + connectivity: + description: Gateway connectivity. If specified the gateway will + be attached to specified network. + items: + properties: + networkId: + description: Network the gateway will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the Yandex Cloud API Gateway. + type: string + customDomains: + description: Set of custom domains to be attached to Yandex API + Gateway. + items: + properties: + certificateId: + type: string + domainId: + type: string + fqdn: + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud API Gateway. + type: string + domain: + description: Default domain for the Yandex API Gateway. Generated + at creation time. + type: string + executionTimeout: + description: Execution timeout in seconds for the Yandex Cloud + API Gateway. + type: string + folderId: + description: Folder ID for the Yandex Cloud API Gateway. If it + is not provided, the default provider folder is used. + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud API Gateway. + type: object + x-kubernetes-map-type: granular + logGroupId: + description: Log entries are written to specified log group + type: string + logOptions: + description: Options for logging from Yandex Cloud API Gateway. + items: + properties: + disabled: + description: Is logging from API Gateway disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud API Gateway. + If it is not provided, the default provider folder is + used. + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: Yandex Cloud API Gateway name used to define API + Gateway. + type: string + spec: + description: OpenAPI specification for Yandex API Gateway. + type: string + status: + description: Status of the Yandex API Gateway. + type: string + userDomains: + description: (DEPRECATED, use custom_domains instead) Set of user + domains attached to Yandex API Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + variables: + additionalProperties: + type: string + description: A set of values for variables in gateway specification. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/backup.yandex-cloud.upjet.crossplane.io_policies.yaml b/package/crds/backup.yandex-cloud.upjet.crossplane.io_policies.yaml new file mode 100644 index 0000000..a723554 --- /dev/null +++ b/package/crds/backup.yandex-cloud.upjet.crossplane.io_policies.yaml @@ -0,0 +1,1116 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: policies.backup.yandex-cloud.upjet.crossplane.io +spec: + group: backup.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Policy is the Schema for the Policys API. Allows management of + Yandex.Cloud Backup Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + archiveName: + description: '[Plan ID]-[Unique ID]a) — The name of generated + archives.' + type: string + cbt: + description: |- + — Configuration of Changed Block Tracking. + Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + type: string + compression: + description: |- + — Archive compression level. Affects CPU. + Available values: "NORMAL", "HIGH", "MAX", "OFF". + type: string + fastBackupEnabled: + description: — Enable flag + type: boolean + folderId: + description: — days + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + format: + description: |- + — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". + Available values: "AUTO", "VERSION_11", "VERSION_12". + type: string + multiVolumeSnapshottingEnabled: + description: — If true, snapshots of multiple volumes will be + taken simultaneously. + type: boolean + name: + description: — Name of the policy + type: string + performanceWindowEnabled: + description: — Time windows for performance limitations of backup. + type: boolean + preserveFileSecuritySettings: + description: — Preserves file security settings. It's better to + set this option to true. + type: boolean + quiesceSnapshottingEnabled: + description: — If true, a quiesced snapshot of the virtual machine + will be taken. + type: boolean + reattempts: + description: |- + — Amount of reattempts that should be performed while trying to make backup at the host. + This attribute consists of the following parameters: + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + retention: + description: |- + — Retention policy for backups. Allows to setup backups lifecycle. + This attribute consists of the following parameters: + items: + properties: + afterBackup: + description: — Defines whether retention rule applies after + creating backup or before. + type: boolean + rules: + description: — seconds + items: + properties: + maxAge: + description: (Conflicts with max_count) — Deletes + backups that older than max_age. Exactly one of + max_count or max_age should be set. + type: string + maxCount: + description: (Conflicts with max_age) — Deletes backups + if it's count exceeds max_count. Exactly one of + max_count or max_age should be set. + type: number + repeatPeriod: + description: — days + items: + type: string + type: array + type: object + type: array + type: object + type: array + scheduling: + description: — Schedule settings for creating backups on the host. + items: + properties: + enabled: + description: — Enable flag + type: boolean + executeByInterval: + description: |- + — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. + See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: |- + — If true, schedule will be applied on the last day of month. + See day_type for available values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. See + interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup will + be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + maxParallelBackups: + description: — Maximum number of backup processes allowed + to run in parallel. 0 for unlimited. + type: number + randomMaxDelay: + description: |- + — Configuration of the random delay between the execution of parallel tasks. + See interval_type for available values. + type: string + scheme: + description: |- + — Scheme of the backups. + Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + type: string + weeklyBackupDay: + description: |- + — A day of week to start weekly backups. + See day_type for available values. + type: string + type: object + type: array + silentModeEnabled: + description: — if true, a user interaction will be avoided when + possible. + type: boolean + splittingBytes: + description: — determines the size to split backups. It's better + to leave this option unchanged. + type: string + vmSnapshotReattempts: + description: |- + (Requied) — Amount of reattempts that should be performed while trying to make snapshot. + This attribute consists of the following parameters: + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + vssProvider: + description: |- + — Settings for the volume shadow copy service. + Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + archiveName: + description: '[Plan ID]-[Unique ID]a) — The name of generated + archives.' + type: string + cbt: + description: |- + — Configuration of Changed Block Tracking. + Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + type: string + compression: + description: |- + — Archive compression level. Affects CPU. + Available values: "NORMAL", "HIGH", "MAX", "OFF". + type: string + fastBackupEnabled: + description: — Enable flag + type: boolean + folderId: + description: — days + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + format: + description: |- + — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". + Available values: "AUTO", "VERSION_11", "VERSION_12". + type: string + multiVolumeSnapshottingEnabled: + description: — If true, snapshots of multiple volumes will be + taken simultaneously. + type: boolean + name: + description: — Name of the policy + type: string + performanceWindowEnabled: + description: — Time windows for performance limitations of backup. + type: boolean + preserveFileSecuritySettings: + description: — Preserves file security settings. It's better to + set this option to true. + type: boolean + quiesceSnapshottingEnabled: + description: — If true, a quiesced snapshot of the virtual machine + will be taken. + type: boolean + reattempts: + description: |- + — Amount of reattempts that should be performed while trying to make backup at the host. + This attribute consists of the following parameters: + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + retention: + description: |- + — Retention policy for backups. Allows to setup backups lifecycle. + This attribute consists of the following parameters: + items: + properties: + afterBackup: + description: — Defines whether retention rule applies after + creating backup or before. + type: boolean + rules: + description: — seconds + items: + properties: + maxAge: + description: (Conflicts with max_count) — Deletes + backups that older than max_age. Exactly one of + max_count or max_age should be set. + type: string + maxCount: + description: (Conflicts with max_age) — Deletes backups + if it's count exceeds max_count. Exactly one of + max_count or max_age should be set. + type: number + repeatPeriod: + description: — days + items: + type: string + type: array + type: object + type: array + type: object + type: array + scheduling: + description: — Schedule settings for creating backups on the host. + items: + properties: + enabled: + description: — Enable flag + type: boolean + executeByInterval: + description: |- + — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. + See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: |- + — If true, schedule will be applied on the last day of month. + See day_type for available values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. See + interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup will + be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + maxParallelBackups: + description: — Maximum number of backup processes allowed + to run in parallel. 0 for unlimited. + type: number + randomMaxDelay: + description: |- + — Configuration of the random delay between the execution of parallel tasks. + See interval_type for available values. + type: string + scheme: + description: |- + — Scheme of the backups. + Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + type: string + weeklyBackupDay: + description: |- + — A day of week to start weekly backups. + See day_type for available values. + type: string + type: object + type: array + silentModeEnabled: + description: — if true, a user interaction will be avoided when + possible. + type: boolean + splittingBytes: + description: — determines the size to split backups. It's better + to leave this option unchanged. + type: string + vmSnapshotReattempts: + description: |- + (Requied) — Amount of reattempts that should be performed while trying to make snapshot. + This attribute consists of the following parameters: + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + vssProvider: + description: |- + — Settings for the volume shadow copy service. + Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.reattempts is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.reattempts) + || (has(self.initProvider) && has(self.initProvider.reattempts))' + - message: spec.forProvider.retention is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retention) + || (has(self.initProvider) && has(self.initProvider.retention))' + - message: spec.forProvider.scheduling is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scheduling) + || (has(self.initProvider) && has(self.initProvider.scheduling))' + - message: spec.forProvider.vmSnapshotReattempts is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vmSnapshotReattempts) + || (has(self.initProvider) && has(self.initProvider.vmSnapshotReattempts))' + status: + description: PolicyStatus defines the observed state of Policy. + properties: + atProvider: + properties: + archiveName: + description: '[Plan ID]-[Unique ID]a) — The name of generated + archives.' + type: string + cbt: + description: |- + — Configuration of Changed Block Tracking. + Available values are: "USE_IF_ENABLED", "ENABLED_AND_USE", "DO_NOT_USE". + type: string + compression: + description: |- + — Archive compression level. Affects CPU. + Available values: "NORMAL", "HIGH", "MAX", "OFF". + type: string + createdAt: + type: string + enabled: + description: — Enable flag + type: boolean + fastBackupEnabled: + description: — Enable flag + type: boolean + folderId: + description: — days + type: string + format: + description: |- + — Format of the backup. It's strongly recommend to leave this option empty or "AUTO". + Available values: "AUTO", "VERSION_11", "VERSION_12". + type: string + id: + description: — days + type: string + multiVolumeSnapshottingEnabled: + description: — If true, snapshots of multiple volumes will be + taken simultaneously. + type: boolean + name: + description: — Name of the policy + type: string + performanceWindowEnabled: + description: — Time windows for performance limitations of backup. + type: boolean + preserveFileSecuritySettings: + description: — Preserves file security settings. It's better to + set this option to true. + type: boolean + quiesceSnapshottingEnabled: + description: — If true, a quiesced snapshot of the virtual machine + will be taken. + type: boolean + reattempts: + description: |- + — Amount of reattempts that should be performed while trying to make backup at the host. + This attribute consists of the following parameters: + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + retention: + description: |- + — Retention policy for backups. Allows to setup backups lifecycle. + This attribute consists of the following parameters: + items: + properties: + afterBackup: + description: — Defines whether retention rule applies after + creating backup or before. + type: boolean + rules: + description: — seconds + items: + properties: + maxAge: + description: (Conflicts with max_count) — Deletes + backups that older than max_age. Exactly one of + max_count or max_age should be set. + type: string + maxCount: + description: (Conflicts with max_age) — Deletes backups + if it's count exceeds max_count. Exactly one of + max_count or max_age should be set. + type: number + repeatPeriod: + description: — days + items: + type: string + type: array + type: object + type: array + type: object + type: array + scheduling: + description: — Schedule settings for creating backups on the host. + items: + properties: + enabled: + description: — Enable flag + type: boolean + executeByInterval: + description: |- + — Perform backup by interval, since last backup of the host. Maximum value is: 9999 days. + See interval_type for available values. Exactly on of options should be set: execute_by_interval or execute_by_time. + type: number + executeByTime: + description: '— Perform backup periodically at specific + time. Exactly on of options should be set: execute_by_interval + or execute_by_time.' + items: + properties: + includeLastDayOfMonth: + description: |- + — If true, schedule will be applied on the last day of month. + See day_type for available values. + type: boolean + monthdays: + description: — List of days when schedule applies. + Used in "MONTHLY" type. + items: + type: number + type: array + months: + description: — seconds + items: + type: number + type: array + repeatAt: + description: hours format), when the schedule applies. + items: + type: string + type: array + repeatEvery: + description: — Frequency of backup repetition. See + interval_type for available values. + type: string + type: + description: '— Type of the scheduling. Available + values are: "HOURLY", "DAILY", "WEEKLY", "MONTHLY".' + type: string + weekdays: + description: — List of weekdays when the backup will + be applied. Used in "WEEKLY" type. + items: + type: string + type: array + type: object + type: array + maxParallelBackups: + description: — Maximum number of backup processes allowed + to run in parallel. 0 for unlimited. + type: number + randomMaxDelay: + description: |- + — Configuration of the random delay between the execution of parallel tasks. + See interval_type for available values. + type: string + scheme: + description: |- + — Scheme of the backups. + Available values are: "ALWAYS_INCREMENTAL", "ALWAYS_FULL", "WEEKLY_FULL_DAILY_INCREMENTAL", 'WEEKLY_INCREMENTAL". + type: string + weeklyBackupDay: + description: |- + — A day of week to start weekly backups. + See day_type for available values. + type: string + type: object + type: array + silentModeEnabled: + description: — if true, a user interaction will be avoided when + possible. + type: boolean + splittingBytes: + description: — determines the size to split backups. It's better + to leave this option unchanged. + type: string + updatedAt: + type: string + vmSnapshotReattempts: + description: |- + (Requied) — Amount of reattempts that should be performed while trying to make snapshot. + This attribute consists of the following parameters: + items: + properties: + enabled: + description: — Enable flag + type: boolean + interval: + description: — Retry interval. See interval_type for available + values + type: string + maxAttempts: + description: — Maximum number of attempts before throwing + an error + type: number + type: object + type: array + vssProvider: + description: |- + — Settings for the volume shadow copy service. + Available values are: "NATIVE", "TARGET_SYSTEM_DEFINED" + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/billing.yandex-cloud.upjet.crossplane.io_cloudbindings.yaml b/package/crds/billing.yandex-cloud.upjet.crossplane.io_cloudbindings.yaml new file mode 100644 index 0000000..de3522f --- /dev/null +++ b/package/crds/billing.yandex-cloud.upjet.crossplane.io_cloudbindings.yaml @@ -0,0 +1,353 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: cloudbindings.billing.yandex-cloud.upjet.crossplane.io +spec: + group: billing.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CloudBinding + listKind: CloudBindingList + plural: cloudbindings + singular: cloudbinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CloudBinding is the Schema for the CloudBindings API. Bind cloud + to billing account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudBindingSpec defines the desired state of CloudBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + billingAccountId: + description: ID of billing account to bind cloud to. + type: string + cloudId: + description: ID of cloud to bind. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + billingAccountId: + description: ID of billing account to bind cloud to. + type: string + cloudId: + description: ID of cloud to bind. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.billingAccountId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.billingAccountId) + || (has(self.initProvider) && has(self.initProvider.billingAccountId))' + - message: spec.forProvider.cloudId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.cloudId) + || (has(self.initProvider) && has(self.initProvider.cloudId))' + status: + description: CloudBindingStatus defines the observed state of CloudBinding. + properties: + atProvider: + properties: + billingAccountId: + description: ID of billing account to bind cloud to. + type: string + cloudId: + description: ID of cloud to bind. + type: string + id: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cdn.yandex-cloud.upjet.crossplane.io_origingroups.yaml b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_origingroups.yaml new file mode 100644 index 0000000..021c8f5 --- /dev/null +++ b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_origingroups.yaml @@ -0,0 +1,552 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: origingroups.cdn.yandex-cloud.upjet.crossplane.io +spec: + group: cdn.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: OriginGroup + listKind: OriginGroupList + plural: origingroups + singular: origingroup + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OriginGroup is the Schema for the OriginGroups API. Allows management + of a Yandex.Cloud CDN Origin Groups. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OriginGroupSpec defines the desired state of OriginGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: CDN Origin Group name used to define device. + type: string + origin: + items: + properties: + backup: + type: boolean + enabled: + type: boolean + source: + type: string + type: object + type: array + useNext: + description: If the option is active (has true value), in case + the origin responds with 4XX or 5XX codes, use the next origin + from the list. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: CDN Origin Group name used to define device. + type: string + origin: + items: + properties: + backup: + type: boolean + enabled: + type: boolean + source: + type: string + type: object + type: array + useNext: + description: If the option is active (has true value), in case + the origin responds with 4XX or 5XX codes, use the next origin + from the list. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.origin is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.origin) + || (has(self.initProvider) && has(self.initProvider.origin))' + status: + description: OriginGroupStatus defines the observed state of OriginGroup. + properties: + atProvider: + properties: + folderId: + type: string + id: + type: string + name: + description: CDN Origin Group name used to define device. + type: string + origin: + items: + properties: + backup: + type: boolean + enabled: + type: boolean + originGroupId: + type: number + source: + type: string + type: object + type: array + useNext: + description: If the option is active (has true value), in case + the origin responds with 4XX or 5XX codes, use the next origin + from the list. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cdn.yandex-cloud.upjet.crossplane.io_resources.yaml b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_resources.yaml new file mode 100644 index 0000000..d096a5b --- /dev/null +++ b/package/crds/cdn.yandex-cloud.upjet.crossplane.io_resources.yaml @@ -0,0 +1,1184 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: resources.cdn.yandex-cloud.upjet.crossplane.io +spec: + group: cdn.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Resource + listKind: ResourceList + plural: resources + singular: resource + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Resource is the Schema for the Resources API. Allows management + of a Yandex.Cloud CDN Resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceSpec defines the desired state of Resource + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + active: + description: Flag to create Resource either in active or disabled + state. True - the content from CDN is available to clients. + type: boolean + cname: + description: CDN endpoint CNAME, must be unique among resources. + type: string + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: CDN Resource settings and options to tune CDN edge + behavior. + items: + properties: + allowedHttpMethods: + description: 'HTTP methods for your CDN content. By default + the following methods are allowed: GET, HEAD, POST, PUT, + PATCH, DELETE, OPTIONS. In case some methods are not allowed + to the user, they will get the 405 (Method Not Allowed) + response. If the method is not supported, the user gets + the 501 (Not Implemented) response.' + items: + type: string + type: array + browserCacheSettings: + description: 'set up a cache period for the end-users browser. + Content will be cached due to origin settings. If there + are no cache settings on your origin, the content will + not be cached. The list of HTTP response codes that can + be cached in browsers: 200, 201, 204, 206, 301, 302, 303, + 304, 307, 308. Other response codes will not be cached. + The default value is 4 days.' + type: number + cacheHttpHeaders: + description: list HTTP headers that must be included in + responses to clients. + items: + type: string + type: array + cors: + description: parameter that lets browsers get access to + selected resources from a domain different to a domain + from which the request is received. + items: + type: string + type: array + customHostHeader: + description: custom value for the Host header. Your server + must be able to process requests with the chosen header. + type: string + customServerName: + description: wildcard additional CNAME. If a resource has + a wildcard additional CNAME, you can use your own certificate + for content delivery via HTTPS. Read-only. + type: string + disableCache: + description: setup a cache status. + type: boolean + disableProxyForceRanges: + description: disabling proxy force ranges. + type: boolean + edgeCacheSettings: + description: content will be cached according to origin + cache settings. The value applies for a response with + codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 + if an origin server does not have caching HTTP headers. + Responses with other codes will not be cached. + type: number + enableIpUrlSigning: + description: enable access limiting by IP addresses, option + available only with setting secure_key. + type: boolean + fetchedCompressed: + description: option helps you to reduce the bandwidth between + origin and CDN servers. Also, content delivery speed becomes + higher because of reducing the time for compressing files + in a CDN. + type: boolean + forwardHostHeader: + description: choose the Forward Host header option if is + important to send in the request to the Origin the same + Host header as was sent in the request to CDN server. + type: boolean + gzipOn: + description: GZip compression at CDN servers reduces file + size by 70% and can be as high as 90%. + type: boolean + ignoreCookie: + description: set for ignoring cookie. + type: boolean + ignoreQueryParams: + description: files with different query parameters are cached + as objects with the same key regardless of the parameter + value. selected by default. + type: boolean + ipAddressAcl: + items: + properties: + exceptedValues: + description: the list of specified IP addresses to + be allowed or denied depending on acl policy type. + items: + type: string + type: array + policyType: + description: the policy type for ip_address_acl option, + one of "allow" or "deny" values. + type: string + type: object + type: array + proxyCacheMethodsSet: + description: allows caching for GET, HEAD and POST requests. + type: boolean + queryParamsBlacklist: + description: files with the specified query parameters are + cached as objects with the same key, files with other + parameters are cached as objects with different keys. + items: + type: string + type: array + queryParamsWhitelist: + description: files with the specified query parameters are + cached as objects with different keys, files with other + parameters are cached as objects with the same key. + items: + type: string + type: array + redirectHttpToHttps: + description: set up a redirect from HTTP to HTTPS. + type: boolean + redirectHttpsToHttp: + description: set up a redirect from HTTPS to HTTP. + type: boolean + secureKey: + description: set secure key for url encoding to protect + contect and limit access by IP addresses and time limits. + type: string + slice: + description: files larger than 10 MB will be requested and + cached in parts (no larger than 10 MB each part). It reduces + time to first byte. The origin must support HTTP Range + requests. + type: boolean + staticRequestHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in requests to origins. + type: object + x-kubernetes-map-type: granular + staticResponseHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in response to clients. + type: object + x-kubernetes-map-type: granular + type: object + type: array + originGroupId: + type: number + originGroupIdRef: + description: Reference to a OriginGroup in cdn to populate originGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originGroupIdSelector: + description: Selector for a OriginGroup in cdn to populate originGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + originGroupName: + type: string + originProtocol: + type: string + secondaryHostnames: + description: list of secondary hostname strings. + items: + type: string + type: array + x-kubernetes-list-type: set + sslCertificate: + description: SSL certificate of CDN resource. + items: + properties: + certificateManagerId: + type: string + type: + type: string + type: object + type: array + updatedAt: + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + active: + description: Flag to create Resource either in active or disabled + state. True - the content from CDN is available to clients. + type: boolean + cname: + description: CDN endpoint CNAME, must be unique among resources. + type: string + folderId: + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + options: + description: CDN Resource settings and options to tune CDN edge + behavior. + items: + properties: + allowedHttpMethods: + description: 'HTTP methods for your CDN content. By default + the following methods are allowed: GET, HEAD, POST, PUT, + PATCH, DELETE, OPTIONS. In case some methods are not allowed + to the user, they will get the 405 (Method Not Allowed) + response. If the method is not supported, the user gets + the 501 (Not Implemented) response.' + items: + type: string + type: array + browserCacheSettings: + description: 'set up a cache period for the end-users browser. + Content will be cached due to origin settings. If there + are no cache settings on your origin, the content will + not be cached. The list of HTTP response codes that can + be cached in browsers: 200, 201, 204, 206, 301, 302, 303, + 304, 307, 308. Other response codes will not be cached. + The default value is 4 days.' + type: number + cacheHttpHeaders: + description: list HTTP headers that must be included in + responses to clients. + items: + type: string + type: array + cors: + description: parameter that lets browsers get access to + selected resources from a domain different to a domain + from which the request is received. + items: + type: string + type: array + customHostHeader: + description: custom value for the Host header. Your server + must be able to process requests with the chosen header. + type: string + customServerName: + description: wildcard additional CNAME. If a resource has + a wildcard additional CNAME, you can use your own certificate + for content delivery via HTTPS. Read-only. + type: string + disableCache: + description: setup a cache status. + type: boolean + disableProxyForceRanges: + description: disabling proxy force ranges. + type: boolean + edgeCacheSettings: + description: content will be cached according to origin + cache settings. The value applies for a response with + codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 + if an origin server does not have caching HTTP headers. + Responses with other codes will not be cached. + type: number + enableIpUrlSigning: + description: enable access limiting by IP addresses, option + available only with setting secure_key. + type: boolean + fetchedCompressed: + description: option helps you to reduce the bandwidth between + origin and CDN servers. Also, content delivery speed becomes + higher because of reducing the time for compressing files + in a CDN. + type: boolean + forwardHostHeader: + description: choose the Forward Host header option if is + important to send in the request to the Origin the same + Host header as was sent in the request to CDN server. + type: boolean + gzipOn: + description: GZip compression at CDN servers reduces file + size by 70% and can be as high as 90%. + type: boolean + ignoreCookie: + description: set for ignoring cookie. + type: boolean + ignoreQueryParams: + description: files with different query parameters are cached + as objects with the same key regardless of the parameter + value. selected by default. + type: boolean + ipAddressAcl: + items: + properties: + exceptedValues: + description: the list of specified IP addresses to + be allowed or denied depending on acl policy type. + items: + type: string + type: array + policyType: + description: the policy type for ip_address_acl option, + one of "allow" or "deny" values. + type: string + type: object + type: array + proxyCacheMethodsSet: + description: allows caching for GET, HEAD and POST requests. + type: boolean + queryParamsBlacklist: + description: files with the specified query parameters are + cached as objects with the same key, files with other + parameters are cached as objects with different keys. + items: + type: string + type: array + queryParamsWhitelist: + description: files with the specified query parameters are + cached as objects with different keys, files with other + parameters are cached as objects with the same key. + items: + type: string + type: array + redirectHttpToHttps: + description: set up a redirect from HTTP to HTTPS. + type: boolean + redirectHttpsToHttp: + description: set up a redirect from HTTPS to HTTP. + type: boolean + secureKey: + description: set secure key for url encoding to protect + contect and limit access by IP addresses and time limits. + type: string + slice: + description: files larger than 10 MB will be requested and + cached in parts (no larger than 10 MB each part). It reduces + time to first byte. The origin must support HTTP Range + requests. + type: boolean + staticRequestHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in requests to origins. + type: object + x-kubernetes-map-type: granular + staticResponseHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in response to clients. + type: object + x-kubernetes-map-type: granular + type: object + type: array + originGroupId: + type: number + originGroupIdRef: + description: Reference to a OriginGroup in cdn to populate originGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originGroupIdSelector: + description: Selector for a OriginGroup in cdn to populate originGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + originGroupName: + type: string + originProtocol: + type: string + secondaryHostnames: + description: list of secondary hostname strings. + items: + type: string + type: array + x-kubernetes-list-type: set + sslCertificate: + description: SSL certificate of CDN resource. + items: + properties: + certificateManagerId: + type: string + type: + type: string + type: object + type: array + updatedAt: + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ResourceStatus defines the observed state of Resource. + properties: + atProvider: + properties: + active: + description: Flag to create Resource either in active or disabled + state. True - the content from CDN is available to clients. + type: boolean + cname: + description: CDN endpoint CNAME, must be unique among resources. + type: string + createdAt: + description: Creation timestamp of the IoT Core Device + type: string + folderId: + type: string + id: + type: string + options: + description: CDN Resource settings and options to tune CDN edge + behavior. + items: + properties: + allowedHttpMethods: + description: 'HTTP methods for your CDN content. By default + the following methods are allowed: GET, HEAD, POST, PUT, + PATCH, DELETE, OPTIONS. In case some methods are not allowed + to the user, they will get the 405 (Method Not Allowed) + response. If the method is not supported, the user gets + the 501 (Not Implemented) response.' + items: + type: string + type: array + browserCacheSettings: + description: 'set up a cache period for the end-users browser. + Content will be cached due to origin settings. If there + are no cache settings on your origin, the content will + not be cached. The list of HTTP response codes that can + be cached in browsers: 200, 201, 204, 206, 301, 302, 303, + 304, 307, 308. Other response codes will not be cached. + The default value is 4 days.' + type: number + cacheHttpHeaders: + description: list HTTP headers that must be included in + responses to clients. + items: + type: string + type: array + cors: + description: parameter that lets browsers get access to + selected resources from a domain different to a domain + from which the request is received. + items: + type: string + type: array + customHostHeader: + description: custom value for the Host header. Your server + must be able to process requests with the chosen header. + type: string + customServerName: + description: wildcard additional CNAME. If a resource has + a wildcard additional CNAME, you can use your own certificate + for content delivery via HTTPS. Read-only. + type: string + disableCache: + description: setup a cache status. + type: boolean + disableProxyForceRanges: + description: disabling proxy force ranges. + type: boolean + edgeCacheSettings: + description: content will be cached according to origin + cache settings. The value applies for a response with + codes 200, 201, 204, 206, 301, 302, 303, 304, 307, 308 + if an origin server does not have caching HTTP headers. + Responses with other codes will not be cached. + type: number + enableIpUrlSigning: + description: enable access limiting by IP addresses, option + available only with setting secure_key. + type: boolean + fetchedCompressed: + description: option helps you to reduce the bandwidth between + origin and CDN servers. Also, content delivery speed becomes + higher because of reducing the time for compressing files + in a CDN. + type: boolean + forwardHostHeader: + description: choose the Forward Host header option if is + important to send in the request to the Origin the same + Host header as was sent in the request to CDN server. + type: boolean + gzipOn: + description: GZip compression at CDN servers reduces file + size by 70% and can be as high as 90%. + type: boolean + ignoreCookie: + description: set for ignoring cookie. + type: boolean + ignoreQueryParams: + description: files with different query parameters are cached + as objects with the same key regardless of the parameter + value. selected by default. + type: boolean + ipAddressAcl: + items: + properties: + exceptedValues: + description: the list of specified IP addresses to + be allowed or denied depending on acl policy type. + items: + type: string + type: array + policyType: + description: the policy type for ip_address_acl option, + one of "allow" or "deny" values. + type: string + type: object + type: array + proxyCacheMethodsSet: + description: allows caching for GET, HEAD and POST requests. + type: boolean + queryParamsBlacklist: + description: files with the specified query parameters are + cached as objects with the same key, files with other + parameters are cached as objects with different keys. + items: + type: string + type: array + queryParamsWhitelist: + description: files with the specified query parameters are + cached as objects with different keys, files with other + parameters are cached as objects with the same key. + items: + type: string + type: array + redirectHttpToHttps: + description: set up a redirect from HTTP to HTTPS. + type: boolean + redirectHttpsToHttp: + description: set up a redirect from HTTPS to HTTP. + type: boolean + secureKey: + description: set secure key for url encoding to protect + contect and limit access by IP addresses and time limits. + type: string + slice: + description: files larger than 10 MB will be requested and + cached in parts (no larger than 10 MB each part). It reduces + time to first byte. The origin must support HTTP Range + requests. + type: boolean + staticRequestHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in requests to origins. + type: object + x-kubernetes-map-type: granular + staticResponseHeaders: + additionalProperties: + type: string + description: set up custom headers that CDN servers will + send in response to clients. + type: object + x-kubernetes-map-type: granular + type: object + type: array + originGroupId: + type: number + originGroupName: + type: string + originProtocol: + type: string + providerCname: + description: provider CNAME of CDN resource, computed value for + read and update operations. + type: string + secondaryHostnames: + description: list of secondary hostname strings. + items: + type: string + type: array + x-kubernetes-list-type: set + sslCertificate: + description: SSL certificate of CDN resource. + items: + properties: + certificateManagerId: + type: string + status: + type: string + type: + type: string + type: object + type: array + updatedAt: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/cm.yandex-cloud.upjet.crossplane.io_certificates.yaml b/package/crds/cm.yandex-cloud.upjet.crossplane.io_certificates.yaml new file mode 100644 index 0000000..97508fe --- /dev/null +++ b/package/crds/cm.yandex-cloud.upjet.crossplane.io_certificates.yaml @@ -0,0 +1,778 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: certificates.cm.yandex-cloud.upjet.crossplane.io +spec: + group: cm.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Certificate + listKind: CertificateList + plural: certificates + singular: certificate + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Certificate is the Schema for the Certificates API. A TLS certificate + signed by a certification authority confirming that it belongs to the owner + of the domain name. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deletionProtection: + type: boolean + description: + description: Certificate description. + type: string + domains: + description: Domains for this certificate. Should be specified + for managed certificates. + items: + type: string + type: array + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this certificate. + type: object + x-kubernetes-map-type: granular + managed: + description: Managed specification. Structure is documented below. + items: + properties: + challengeCount: + description: |- + . Expected number of challenge count needed to validate certificate. + Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. + This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + type: number + challengeType: + description: 'Domain owner-check method. Possible values:' + type: string + type: object + type: array + name: + description: Certificate name. + type: string + selfManaged: + description: Self-managed specification. Structure is documented + below. + items: + properties: + certificate: + description: Certificate with chain. + type: string + privateKeyLockboxSecret: + description: Lockbox secret specification for getting private + key. Structure is documented below. + items: + properties: + id: + description: Lockbox secret Id. + type: string + key: + description: Key of the Lockbox secret, the value + of which contains the private key of the certificate. + type: string + type: object + type: array + privateKeySecretRef: + description: Private key of certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deletionProtection: + type: boolean + description: + description: Certificate description. + type: string + domains: + description: Domains for this certificate. Should be specified + for managed certificates. + items: + type: string + type: array + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: Labels to assign to this certificate. + type: object + x-kubernetes-map-type: granular + managed: + description: Managed specification. Structure is documented below. + items: + properties: + challengeCount: + description: |- + . Expected number of challenge count needed to validate certificate. + Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. + This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + type: number + challengeType: + description: 'Domain owner-check method. Possible values:' + type: string + type: object + type: array + name: + description: Certificate name. + type: string + selfManaged: + description: Self-managed specification. Structure is documented + below. + items: + properties: + certificate: + description: Certificate with chain. + type: string + privateKeyLockboxSecret: + description: Lockbox secret specification for getting private + key. Structure is documented below. + items: + properties: + id: + description: Lockbox secret Id. + type: string + key: + description: Key of the Lockbox secret, the value + of which contains the private key of the certificate. + type: string + type: object + type: array + privateKeySecretRef: + description: Private key of certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CertificateStatus defines the observed state of Certificate. + properties: + atProvider: + properties: + challenges: + description: Array of challenges. Structure is documented below. + items: + properties: + createdAt: + description: Time the challenge was created. + type: string + dnsName: + description: DNS record name (only for DNS challenge). + type: string + dnsType: + description: 'DNS record type: "TXT" or "CNAME" (only for + DNS challenge).' + type: string + dnsValue: + description: DNS record value (only for DNS challenge). + type: string + domain: + description: Validated domain. + type: string + httpContent: + description: The content that should be made accessible + with the given http_url (only for HTTP challenge). + type: string + httpUrl: + description: URL where the challenge content http_content + should be placed (only for HTTP challenge). + type: string + message: + description: Current status message. + type: string + type: + description: Challenge type "DNS" or "HTTP". + type: string + updatedAt: + description: Last time the challenge was updated. + type: string + type: object + type: array + createdAt: + description: Certificate create timestamp. + type: string + deletionProtection: + type: boolean + description: + description: Certificate description. + type: string + domains: + description: Domains for this certificate. Should be specified + for managed certificates. + items: + type: string + type: array + folderId: + description: Folder that the resource belongs to. If value is + omitted, the default provider folder is used. + type: string + id: + description: Certificate Id. + type: string + issuedAt: + description: Certificate issue timestamp. + type: string + issuer: + description: Certificate issuer. + type: string + labels: + additionalProperties: + type: string + description: Labels to assign to this certificate. + type: object + x-kubernetes-map-type: granular + managed: + description: Managed specification. Structure is documented below. + items: + properties: + challengeCount: + description: |- + . Expected number of challenge count needed to validate certificate. + Resource creation will fail if the specified value does not match the actual number of challenges received from issue provider. + This argument is helpful for safe automatic resource creation for passing challenges for multi-domain certificates. + type: number + challengeType: + description: 'Domain owner-check method. Possible values:' + type: string + type: object + type: array + name: + description: Certificate name. + type: string + notAfter: + description: Certificate end valid period. + type: string + notBefore: + description: Certificate start valid period. + type: string + selfManaged: + description: Self-managed specification. Structure is documented + below. + items: + properties: + certificate: + description: Certificate with chain. + type: string + privateKeyLockboxSecret: + description: Lockbox secret specification for getting private + key. Structure is documented below. + items: + properties: + id: + description: Lockbox secret Id. + type: string + key: + description: Key of the Lockbox secret, the value + of which contains the private key of the certificate. + type: string + type: object + type: array + type: object + type: array + serial: + description: Certificate serial number. + type: string + status: + description: 'Certificate status: "VALIDATING", "INVALID", "ISSUED", + "REVOKED", "RENEWING" or "RENEWAL_FAILED".' + type: string + subject: + description: Certificate subject. + type: string + type: + description: 'Certificate type: "MANAGED" or "IMPORTED".' + type: string + updatedAt: + description: Certificate update timestamp. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskiambindings.yaml new file mode 100644 index 0000000..f6c8d78 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskiambindings.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: diskiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DiskIAMBinding + listKind: DiskIAMBindingList + plural: diskiambindings + singular: diskiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DiskIAMBinding is the Schema for the DiskIAMBindings API. Allows + management of a single IAM binding for a Disk. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskIAMBindingSpec defines the desired state of DiskIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + diskId: + description: ID of the disk to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_disk_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + diskId: + description: ID of the disk to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_disk_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.diskId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.diskId) + || (has(self.initProvider) && has(self.initProvider.diskId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: DiskIAMBindingStatus defines the observed state of DiskIAMBinding. + properties: + atProvider: + properties: + diskId: + description: ID of the disk to attach the policy to. + type: string + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_disk_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroupiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroupiambindings.yaml new file mode 100644 index 0000000..01c62ab --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_diskplacementgroupiambindings.yaml @@ -0,0 +1,392 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: diskplacementgroupiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: DiskPlacementGroupIAMBinding + listKind: DiskPlacementGroupIAMBindingList + plural: diskplacementgroupiambindings + singular: diskplacementgroupiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: DiskPlacementGroupIAMBinding is the Schema for the DiskPlacementGroupIAMBindings + API. Allows management of a single IAM binding for a Disk Placement Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskPlacementGroupIAMBindingSpec defines the desired state + of DiskPlacementGroupIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + diskPlacementGroupId: + description: ID of the disk placement group to attach the policy + to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_disk_placement_group_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + diskPlacementGroupId: + description: ID of the disk placement group to attach the policy + to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_disk_placement_group_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.diskPlacementGroupId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.diskPlacementGroupId) + || (has(self.initProvider) && has(self.initProvider.diskPlacementGroupId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: DiskPlacementGroupIAMBindingStatus defines the observed state + of DiskPlacementGroupIAMBinding. + properties: + atProvider: + properties: + diskPlacementGroupId: + description: ID of the disk placement group to attach the policy + to. + type: string + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_disk_placement_group_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystemiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystemiambindings.yaml new file mode 100644 index 0000000..e2fdde1 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_filesystemiambindings.yaml @@ -0,0 +1,388 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: filesystemiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: FilesystemIAMBinding + listKind: FilesystemIAMBindingList + plural: filesystemiambindings + singular: filesystemiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: FilesystemIAMBinding is the Schema for the FilesystemIAMBindings + API. Allows management of a single IAM binding for a Filesystem. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FilesystemIAMBindingSpec defines the desired state of FilesystemIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + filesystemId: + description: ID of the filesystem to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_filesystem_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + filesystemId: + description: ID of the filesystem to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_filesystem_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.filesystemId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.filesystemId) + || (has(self.initProvider) && has(self.initProvider.filesystemId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: FilesystemIAMBindingStatus defines the observed state of + FilesystemIAMBinding. + properties: + atProvider: + properties: + filesystemId: + description: ID of the filesystem to attach the policy to. + type: string + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_filesystem_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusteriambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusteriambindings.yaml new file mode 100644 index 0000000..a69778c --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_gpuclusteriambindings.yaml @@ -0,0 +1,388 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: gpuclusteriambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: GpuClusterIAMBinding + listKind: GpuClusterIAMBindingList + plural: gpuclusteriambindings + singular: gpuclusteriambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: GpuClusterIAMBinding is the Schema for the GpuClusterIAMBindings + API. Allows management of a single IAM binding for a GPU Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GpuClusterIAMBindingSpec defines the desired state of GpuClusterIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + gpuClusterId: + description: ID of the gpu cluster to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_gpu_cluster_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + gpuClusterId: + description: ID of the gpu cluster to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_gpu_cluster_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.gpuClusterId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gpuClusterId) + || (has(self.initProvider) && has(self.initProvider.gpuClusterId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: GpuClusterIAMBindingStatus defines the observed state of + GpuClusterIAMBinding. + properties: + atProvider: + properties: + gpuClusterId: + description: ID of the gpu cluster to attach the policy to. + type: string + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_gpu_cluster_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_imageiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_imageiambindings.yaml new file mode 100644 index 0000000..41e0567 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_imageiambindings.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: imageiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ImageIAMBinding + listKind: ImageIAMBindingList + plural: imageiambindings + singular: imageiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ImageIAMBinding is the Schema for the ImageIAMBindings API. Allows + management of a single IAM binding for an image. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageIAMBindingSpec defines the desired state of ImageIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + imageId: + description: ID of the image to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_image_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + imageId: + description: ID of the image to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_image_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.imageId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.imageId) + || (has(self.initProvider) && has(self.initProvider.imageId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ImageIAMBindingStatus defines the observed state of ImageIAMBinding. + properties: + atProvider: + properties: + id: + type: string + imageId: + description: ID of the image to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_image_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_instanceiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instanceiambindings.yaml new file mode 100644 index 0000000..1e6224a --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_instanceiambindings.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: instanceiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: InstanceIAMBinding + listKind: InstanceIAMBindingList + plural: instanceiambindings + singular: instanceiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: InstanceIAMBinding is the Schema for the InstanceIAMBindings + API. Allows management of a single IAM binding for an instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceIAMBindingSpec defines the desired state of InstanceIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + instanceId: + description: ID of the instance to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_instance_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + instanceId: + description: ID of the instance to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_instance_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.instanceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instanceId) + || (has(self.initProvider) && has(self.initProvider.instanceId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: InstanceIAMBindingStatus defines the observed state of InstanceIAMBinding. + properties: + atProvider: + properties: + id: + type: string + instanceId: + description: ID of the instance to attach the policy to. + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_instance_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroupiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroupiambindings.yaml new file mode 100644 index 0000000..e49d705 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_placementgroupiambindings.yaml @@ -0,0 +1,389 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: placementgroupiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: PlacementGroupIAMBinding + listKind: PlacementGroupIAMBindingList + plural: placementgroupiambindings + singular: placementgroupiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PlacementGroupIAMBinding is the Schema for the PlacementGroupIAMBindings + API. Allows management of a single IAM binding for a Placement Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlacementGroupIAMBindingSpec defines the desired state of + PlacementGroupIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + placementGroupId: + description: ID of the placement group to attach the policy to. + type: string + role: + description: |- + The role that should be assigned. Only one + yandex_compute_placement_group_iam_binding can be used per role. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + placementGroupId: + description: ID of the placement group to attach the policy to. + type: string + role: + description: |- + The role that should be assigned. Only one + yandex_compute_placement_group_iam_binding can be used per role. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.placementGroupId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.placementGroupId) + || (has(self.initProvider) && has(self.initProvider.placementGroupId))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: PlacementGroupIAMBindingStatus defines the observed state + of PlacementGroupIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + placementGroupId: + description: ID of the placement group to attach the policy to. + type: string + role: + description: |- + The role that should be assigned. Only one + yandex_compute_placement_group_iam_binding can be used per role. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotiambindings.yaml new file mode 100644 index 0000000..9155130 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotiambindings.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: snapshotiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SnapshotIAMBinding + listKind: SnapshotIAMBindingList + plural: snapshotiambindings + singular: snapshotiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SnapshotIAMBinding is the Schema for the SnapshotIAMBindings + API. Allows management of a single IAM binding for a Snapshot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotIAMBindingSpec defines the desired state of SnapshotIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_snapshot_iam_binding can be used per role. + type: string + snapshotId: + description: ID of the snapshot to attach the policy to. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_snapshot_iam_binding can be used per role. + type: string + snapshotId: + description: ID of the snapshot to attach the policy to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + - message: spec.forProvider.snapshotId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.snapshotId) + || (has(self.initProvider) && has(self.initProvider.snapshotId))' + status: + description: SnapshotIAMBindingStatus defines the observed state of SnapshotIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_snapshot_iam_binding can be used per role. + type: string + snapshotId: + description: ID of the snapshot to attach the policy to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotscheduleiambindings.yaml b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotscheduleiambindings.yaml new file mode 100644 index 0000000..9725887 --- /dev/null +++ b/package/crds/compute.yandex-cloud.upjet.crossplane.io_snapshotscheduleiambindings.yaml @@ -0,0 +1,392 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: snapshotscheduleiambindings.compute.yandex-cloud.upjet.crossplane.io +spec: + group: compute.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: SnapshotScheduleIAMBinding + listKind: SnapshotScheduleIAMBindingList + plural: snapshotscheduleiambindings + singular: snapshotscheduleiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: SnapshotScheduleIAMBinding is the Schema for the SnapshotScheduleIAMBindings + API. Allows management of a single IAM binding for a Snapshot Schedule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotScheduleIAMBindingSpec defines the desired state + of SnapshotScheduleIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_snapshot_schedule_iam_binding can be used per role. + type: string + snapshotScheduleId: + description: ID of the snapshot schedule to attach the policy + to. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_snapshot_schedule_iam_binding can be used per role. + type: string + snapshotScheduleId: + description: ID of the snapshot schedule to attach the policy + to. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + - message: spec.forProvider.snapshotScheduleId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.snapshotScheduleId) + || (has(self.initProvider) && has(self.initProvider.snapshotScheduleId))' + status: + description: SnapshotScheduleIAMBindingStatus defines the observed state + of SnapshotScheduleIAMBinding. + properties: + atProvider: + properties: + id: + type: string + members: + description: |- + An array of identities that will be granted the privilege in the role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: |- + The role that should be assigned. Only one + yandex_compute_snapshot_schedule_iam_binding can be used per role. + type: string + snapshotScheduleId: + description: ID of the snapshot schedule to attach the policy + to. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dataproc.yandex-cloud.upjet.crossplane.io_clusters.yaml b/package/crds/dataproc.yandex-cloud.upjet.crossplane.io_clusters.yaml new file mode 100644 index 0000000..6ca365d --- /dev/null +++ b/package/crds/dataproc.yandex-cloud.upjet.crossplane.io_clusters.yaml @@ -0,0 +1,1567 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusters.dataproc.yandex-cloud.upjet.crossplane.io +spec: + group: dataproc.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages a Data Proc + cluster within Yandex.Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bucket: + description: Name of the Object Storage bucket to use for Data + Proc jobs. Data Proc Agent saves output of job driver's process + to specified bucket. In order for this to work service account + (specified by the service_account_id argument) should be given + permission to create objects within this bucket. + type: string + bucketRef: + description: Reference to a Bucket in storage to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + clusterConfig: + description: Configuration and resources for hosts that should + be created with the cluster. The structure is documented below. + items: + properties: + hadoop: + description: Data Proc specific options. The structure is + documented below. + items: + properties: + initializationAction: + description: List of initialization scripts. The structure + is documented below. + items: + properties: + args: + description: List of arguments of the initialization + script. + items: + type: string + type: array + timeout: + description: Script execution timeout, in seconds. + type: string + uri: + description: Script URI. + type: string + type: object + type: array + properties: + additionalProperties: + type: string + description: A set of key/value pairs that are used + to configure cluster services. + type: object + x-kubernetes-map-type: granular + services: + description: List of services to run on Data Proc + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sshPublicKeys: + description: List of SSH public keys to put to the + hosts of the cluster. For information on how to + connect to the cluster, see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + subclusterSpec: + description: Configuration of the Data Proc subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: If true then assign public IP addresses + to the hosts of the subclusters. + type: boolean + autoscalingConfig: + description: Autoscaling configuration for compute + subclusters. + items: + properties: + cpuUtilizationTarget: + description: Defines an autoscaling rule based + on the average CPU utilization of the instance + group. If not set default autoscaling metric + will be used. + type: string + decommissionTimeout: + description: Timeout to gracefully decommission + nodes during downscaling. In seconds. + type: string + maxHostsCount: + description: Maximum number of nodes in autoscaling + subclusters. + type: number + measurementDuration: + description: Time in seconds allotted for averaging + metrics. + type: string + preemptible: + description: Bool flag -- whether to use preemptible + compute instances. Preemptible instances are + stopped at least once every 24 hours, and + can be stopped at any time if their resources + are needed by Compute. For more information, + see Preemptible Virtual Machines. + type: boolean + stabilizationDuration: + description: Minimum amount of time in seconds + allotted for monitoring before Instance Groups + can reduce the number of instances in the + group. During this time, the group size doesn't + decrease, even if the new metric values indicate + that it should. + type: string + warmupDuration: + description: The warmup time of the instance + in seconds. During this time, traffic is sent + to the instance, but instance metrics are + not collected. + type: string + type: object + type: array + hostsCount: + description: Number of hosts within Data Proc subcluster. + type: number + name: + description: Name of the Data Proc subcluster. + type: string + resources: + description: Resources allocated to each host of the + Data Proc subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of a host. + One of network-hdd (default) or network-ssd. + type: string + resourcePresetId: + description: The ID of the preset for computational + resources available to a host. All available + presets are listed in the documentation. + type: string + type: object + type: array + role: + description: Role of the subcluster in the Data Proc + cluster. + type: string + subnetId: + description: The ID of the subnet, to which hosts + of the subcluster belong. Subnets of all the subclusters + must belong to the same VPC network. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + versionId: + description: Version of Data Proc image. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either + true or false. + type: boolean + description: + description: Description of the Data Proc cluster. + type: string + folderId: + description: ID of the folder to create a cluster in. If it is + not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostGroupIds: + description: A list of host group IDs to place VMs of the cluster + on. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Data + Proc cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of a specific Data Proc cluster. + type: string + securityGroupIds: + description: A list of security group IDs that the cluster belongs + to. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: Service account to be used by the Data Proc agent + to access resources of Yandex.Cloud. Selected service account + should have mdb.dataproc.agent role on the folder where the + Data Proc cluster will be located. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + uiProxy: + description: Whether to enable UI Proxy feature. + type: boolean + zoneId: + description: ID of the availability zone to create cluster in. + If it is not provided, the default provider zone is used. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bucket: + description: Name of the Object Storage bucket to use for Data + Proc jobs. Data Proc Agent saves output of job driver's process + to specified bucket. In order for this to work service account + (specified by the service_account_id argument) should be given + permission to create objects within this bucket. + type: string + bucketRef: + description: Reference to a Bucket in storage to populate bucket. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + bucketSelector: + description: Selector for a Bucket in storage to populate bucket. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + clusterConfig: + description: Configuration and resources for hosts that should + be created with the cluster. The structure is documented below. + items: + properties: + hadoop: + description: Data Proc specific options. The structure is + documented below. + items: + properties: + initializationAction: + description: List of initialization scripts. The structure + is documented below. + items: + properties: + args: + description: List of arguments of the initialization + script. + items: + type: string + type: array + timeout: + description: Script execution timeout, in seconds. + type: string + uri: + description: Script URI. + type: string + type: object + type: array + properties: + additionalProperties: + type: string + description: A set of key/value pairs that are used + to configure cluster services. + type: object + x-kubernetes-map-type: granular + services: + description: List of services to run on Data Proc + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sshPublicKeys: + description: List of SSH public keys to put to the + hosts of the cluster. For information on how to + connect to the cluster, see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + subclusterSpec: + description: Configuration of the Data Proc subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: If true then assign public IP addresses + to the hosts of the subclusters. + type: boolean + autoscalingConfig: + description: Autoscaling configuration for compute + subclusters. + items: + properties: + cpuUtilizationTarget: + description: Defines an autoscaling rule based + on the average CPU utilization of the instance + group. If not set default autoscaling metric + will be used. + type: string + decommissionTimeout: + description: Timeout to gracefully decommission + nodes during downscaling. In seconds. + type: string + maxHostsCount: + description: Maximum number of nodes in autoscaling + subclusters. + type: number + measurementDuration: + description: Time in seconds allotted for averaging + metrics. + type: string + preemptible: + description: Bool flag -- whether to use preemptible + compute instances. Preemptible instances are + stopped at least once every 24 hours, and + can be stopped at any time if their resources + are needed by Compute. For more information, + see Preemptible Virtual Machines. + type: boolean + stabilizationDuration: + description: Minimum amount of time in seconds + allotted for monitoring before Instance Groups + can reduce the number of instances in the + group. During this time, the group size doesn't + decrease, even if the new metric values indicate + that it should. + type: string + warmupDuration: + description: The warmup time of the instance + in seconds. During this time, traffic is sent + to the instance, but instance metrics are + not collected. + type: string + type: object + type: array + hostsCount: + description: Number of hosts within Data Proc subcluster. + type: number + name: + description: Name of the Data Proc subcluster. + type: string + resources: + description: Resources allocated to each host of the + Data Proc subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of a host. + One of network-hdd (default) or network-ssd. + type: string + resourcePresetId: + description: The ID of the preset for computational + resources available to a host. All available + presets are listed in the documentation. + type: string + type: object + type: array + role: + description: Role of the subcluster in the Data Proc + cluster. + type: string + subnetId: + description: The ID of the subnet, to which hosts + of the subcluster belong. Subnets of all the subclusters + must belong to the same VPC network. + type: string + subnetIdRef: + description: Reference to a Subnet in vpc to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in vpc to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + versionId: + description: Version of Data Proc image. + type: string + type: object + type: array + deletionProtection: + description: Inhibits deletion of the cluster. Can be either + true or false. + type: boolean + description: + description: Description of the Data Proc cluster. + type: string + folderId: + description: ID of the folder to create a cluster in. If it is + not provided, the default provider folder is used. + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostGroupIds: + description: A list of host group IDs to place VMs of the cluster + on. + items: + type: string + type: array + x-kubernetes-list-type: set + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Data + Proc cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of a specific Data Proc cluster. + type: string + securityGroupIds: + description: A list of security group IDs that the cluster belongs + to. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: Service account to be used by the Data Proc agent + to access resources of Yandex.Cloud. Selected service account + should have mdb.dataproc.agent role on the folder where the + Data Proc cluster will be located. + type: string + serviceAccountIdRef: + description: Reference to a ServiceAccount in iam to populate + serviceAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountIdSelector: + description: Selector for a ServiceAccount in iam to populate + serviceAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + uiProxy: + description: Whether to enable UI Proxy feature. + type: boolean + zoneId: + description: ID of the availability zone to create cluster in. + If it is not provided, the default provider zone is used. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterConfig) + || (has(self.initProvider) && has(self.initProvider.clusterConfig))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + bucket: + description: Name of the Object Storage bucket to use for Data + Proc jobs. Data Proc Agent saves output of job driver's process + to specified bucket. In order for this to work service account + (specified by the service_account_id argument) should be given + permission to create objects within this bucket. + type: string + clusterConfig: + description: Configuration and resources for hosts that should + be created with the cluster. The structure is documented below. + items: + properties: + hadoop: + description: Data Proc specific options. The structure is + documented below. + items: + properties: + initializationAction: + description: List of initialization scripts. The structure + is documented below. + items: + properties: + args: + description: List of arguments of the initialization + script. + items: + type: string + type: array + timeout: + description: Script execution timeout, in seconds. + type: string + uri: + description: Script URI. + type: string + type: object + type: array + properties: + additionalProperties: + type: string + description: A set of key/value pairs that are used + to configure cluster services. + type: object + x-kubernetes-map-type: granular + services: + description: List of services to run on Data Proc + cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + sshPublicKeys: + description: List of SSH public keys to put to the + hosts of the cluster. For information on how to + connect to the cluster, see the official documentation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + subclusterSpec: + description: Configuration of the Data Proc subcluster. + The structure is documented below. + items: + properties: + assignPublicIp: + description: If true then assign public IP addresses + to the hosts of the subclusters. + type: boolean + autoscalingConfig: + description: Autoscaling configuration for compute + subclusters. + items: + properties: + cpuUtilizationTarget: + description: Defines an autoscaling rule based + on the average CPU utilization of the instance + group. If not set default autoscaling metric + will be used. + type: string + decommissionTimeout: + description: Timeout to gracefully decommission + nodes during downscaling. In seconds. + type: string + maxHostsCount: + description: Maximum number of nodes in autoscaling + subclusters. + type: number + measurementDuration: + description: Time in seconds allotted for averaging + metrics. + type: string + preemptible: + description: Bool flag -- whether to use preemptible + compute instances. Preemptible instances are + stopped at least once every 24 hours, and + can be stopped at any time if their resources + are needed by Compute. For more information, + see Preemptible Virtual Machines. + type: boolean + stabilizationDuration: + description: Minimum amount of time in seconds + allotted for monitoring before Instance Groups + can reduce the number of instances in the + group. During this time, the group size doesn't + decrease, even if the new metric values indicate + that it should. + type: string + warmupDuration: + description: The warmup time of the instance + in seconds. During this time, traffic is sent + to the instance, but instance metrics are + not collected. + type: string + type: object + type: array + hostsCount: + description: Number of hosts within Data Proc subcluster. + type: number + id: + description: (Computed) ID of a new Data Proc cluster. + type: string + name: + description: Name of the Data Proc subcluster. + type: string + resources: + description: Resources allocated to each host of the + Data Proc subcluster. The structure is documented + below. + items: + properties: + diskSize: + description: Volume of the storage available + to a host, in gigabytes. + type: number + diskTypeId: + description: Type of the storage of a host. + One of network-hdd (default) or network-ssd. + type: string + resourcePresetId: + description: The ID of the preset for computational + resources available to a host. All available + presets are listed in the documentation. + type: string + type: object + type: array + role: + description: Role of the subcluster in the Data Proc + cluster. + type: string + subnetId: + description: The ID of the subnet, to which hosts + of the subcluster belong. Subnets of all the subclusters + must belong to the same VPC network. + type: string + type: object + type: array + versionId: + description: Version of Data Proc image. + type: string + type: object + type: array + createdAt: + description: (Computed) The Data Proc cluster creation timestamp. + type: string + deletionProtection: + description: Inhibits deletion of the cluster. Can be either + true or false. + type: boolean + description: + description: Description of the Data Proc cluster. + type: string + folderId: + description: ID of the folder to create a cluster in. If it is + not provided, the default provider folder is used. + type: string + hostGroupIds: + description: A list of host group IDs to place VMs of the cluster + on. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: (Computed) ID of a new Data Proc cluster. + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Data + Proc cluster. + type: object + x-kubernetes-map-type: granular + name: + description: Name of a specific Data Proc cluster. + type: string + securityGroupIds: + description: A list of security group IDs that the cluster belongs + to. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceAccountId: + description: Service account to be used by the Data Proc agent + to access resources of Yandex.Cloud. Selected service account + should have mdb.dataproc.agent role on the folder where the + Data Proc cluster will be located. + type: string + uiProxy: + description: Whether to enable UI Proxy feature. + type: boolean + zoneId: + description: ID of the availability zone to create cluster in. + If it is not provided, the default provider zone is used. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/function.yandex-cloud.upjet.crossplane.io_iambindings.yaml b/package/crds/function.yandex-cloud.upjet.crossplane.io_iambindings.yaml new file mode 100644 index 0000000..28a5c97 --- /dev/null +++ b/package/crds/function.yandex-cloud.upjet.crossplane.io_iambindings.yaml @@ -0,0 +1,387 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: iambindings.function.yandex-cloud.upjet.crossplane.io +spec: + group: function.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: IAMBinding + listKind: IAMBindingList + plural: iambindings + singular: iambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: IAMBinding is the Schema for the IAMBindings API. Allows management + of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IAMBindingSpec defines the desired state of IAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + functionId: + description: The Yandex Cloud Function ID to apply a binding to. + type: string + members: + description: |- + Identities that will be granted the privilege in role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + functionId: + description: The Yandex Cloud Function ID to apply a binding to. + type: string + members: + description: |- + Identities that will be granted the privilege in role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.functionId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.functionId) + || (has(self.initProvider) && has(self.initProvider.functionId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: IAMBindingStatus defines the observed state of IAMBinding. + properties: + atProvider: + properties: + functionId: + description: The Yandex Cloud Function ID to apply a binding to. + type: string + id: + type: string + members: + description: |- + Identities that will be granted the privilege in role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. See roles + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/function.yandex-cloud.upjet.crossplane.io_scalingpolicies.yaml b/package/crds/function.yandex-cloud.upjet.crossplane.io_scalingpolicies.yaml new file mode 100644 index 0000000..c810623 --- /dev/null +++ b/package/crds/function.yandex-cloud.upjet.crossplane.io_scalingpolicies.yaml @@ -0,0 +1,397 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: scalingpolicies.function.yandex-cloud.upjet.crossplane.io +spec: + group: function.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ScalingPolicy + listKind: ScalingPolicyList + plural: scalingpolicies + singular: scalingpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScalingPolicy is the Schema for the ScalingPolicys API. Allows + management of a Yandex Cloud Function Scaling Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScalingPolicySpec defines the desired state of ScalingPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + functionId: + description: Yandex Cloud Function id used to define function + type: string + policy: + description: list definition for Yandex Cloud Function scaling + policies + items: + properties: + tag: + description: Yandex.Cloud Function version tag for Yandex + Cloud Function scaling policy + type: string + zoneInstancesLimit: + description: max number of instances in one zone for Yandex.Cloud + Function with tag + type: number + zoneRequestsLimit: + description: max number of requests in one zone for Yandex.Cloud + Function with tag + type: number + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + functionId: + description: Yandex Cloud Function id used to define function + type: string + policy: + description: list definition for Yandex Cloud Function scaling + policies + items: + properties: + tag: + description: Yandex.Cloud Function version tag for Yandex + Cloud Function scaling policy + type: string + zoneInstancesLimit: + description: max number of instances in one zone for Yandex.Cloud + Function with tag + type: number + zoneRequestsLimit: + description: max number of requests in one zone for Yandex.Cloud + Function with tag + type: number + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.functionId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.functionId) + || (has(self.initProvider) && has(self.initProvider.functionId))' + status: + description: ScalingPolicyStatus defines the observed state of ScalingPolicy. + properties: + atProvider: + properties: + functionId: + description: Yandex Cloud Function id used to define function + type: string + id: + type: string + policy: + description: list definition for Yandex Cloud Function scaling + policies + items: + properties: + tag: + description: Yandex.Cloud Function version tag for Yandex + Cloud Function scaling policy + type: string + zoneInstancesLimit: + description: max number of instances in one zone for Yandex.Cloud + Function with tag + type: number + zoneRequestsLimit: + description: max number of requests in one zone for Yandex.Cloud + Function with tag + type: number + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/function.yandex-cloud.upjet.crossplane.io_triggers.yaml b/package/crds/function.yandex-cloud.upjet.crossplane.io_triggers.yaml new file mode 100644 index 0000000..6b7cbeb --- /dev/null +++ b/package/crds/function.yandex-cloud.upjet.crossplane.io_triggers.yaml @@ -0,0 +1,1506 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: triggers.function.yandex-cloud.upjet.crossplane.io +spec: + group: function.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Trigger + listKind: TriggerList + plural: triggers + singular: trigger + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Trigger is the Schema for the Triggers API. Allows management + of a Yandex Cloud Functions Trigger. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TriggerSpec defines the desired state of Trigger + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + container: + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + path: + description: Path for Yandex.Cloud Serverless Container + for Yandex Cloud Functions Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + containerRegistry: + description: Container Registry settings definition for Yandex + Cloud Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + createImage: + description: Boolean flag for setting create image event + for Yandex Cloud Functions Trigger + type: boolean + createImageTag: + description: Boolean flag for setting create image tag event + for Yandex Cloud Functions Trigger + type: boolean + deleteImage: + description: Boolean flag for setting delete image event + for Yandex Cloud Functions Trigger + type: boolean + deleteImageTag: + description: Boolean flag for setting delete image tag event + for Yandex Cloud Functions Trigger + type: boolean + imageName: + description: Image name filter setting for Yandex Cloud + Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + dataStreams: + description: Data Streams settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + database: + description: Stream database for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + streamName: + description: Stream name for Yandex Cloud Functions Trigger + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Functions Trigger + type: string + dlq: + description: Dead Letter Queue settings definition for Yandex + Cloud Functions Trigger + items: + properties: + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + folderId: + description: Folder ID for the Yandex Cloud Functions Trigger + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + function: + description: Yandex.Cloud Function settings definition for Yandex + Cloud Functions Trigger + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + iot: + description: IoT settings definition for Yandex Cloud Functions + Trigger, if present. Only one section iot or message_queue or + object_storage or timer can be defined. + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + deviceId: + description: IoT Device ID for Yandex Cloud Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + topic: + description: IoT Topic for Yandex Cloud Functions Trigger + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Functions Trigger + type: object + x-kubernetes-map-type: granular + logGroup: + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + logGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + logging: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + groupId: + description: Logging group ID for Yandex Cloud Functions + Trigger + type: string + levels: + description: Logging level filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceIds: + description: Resource ID filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTypes: + description: Resource type filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + streamNames: + description: Logging stream name filter setting for Yandex + Cloud Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + mail: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + attachmentsBucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + messageQueue: + description: Message Queue settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + visibilityTimeout: + description: Visibility timeout for Yandex Cloud Functions + Trigger + type: string + type: object + type: array + name: + description: Yandex Cloud Functions Trigger name used to define + trigger + type: string + objectStorage: + description: Object Storage settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + bucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + create: + description: Boolean flag for setting create event for Yandex + Cloud Functions Trigger + type: boolean + delete: + description: Boolean flag for setting delete event for Yandex + Cloud Functions Trigger + type: boolean + prefix: + description: Prefix for Object Storage for Yandex Cloud + Functions Trigger + type: string + suffix: + description: Suffix for Object Storage for Yandex Cloud + Functions Trigger + type: string + update: + description: Boolean flag for setting update event for Yandex + Cloud Functions Trigger + type: boolean + type: object + type: array + timer: + description: Timer settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + cronExpression: + description: Cron expression for timer for Yandex Cloud + Functions Trigger + type: string + payload: + description: Payload to be passed to function + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + container: + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + path: + description: Path for Yandex.Cloud Serverless Container + for Yandex Cloud Functions Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + containerRegistry: + description: Container Registry settings definition for Yandex + Cloud Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + createImage: + description: Boolean flag for setting create image event + for Yandex Cloud Functions Trigger + type: boolean + createImageTag: + description: Boolean flag for setting create image tag event + for Yandex Cloud Functions Trigger + type: boolean + deleteImage: + description: Boolean flag for setting delete image event + for Yandex Cloud Functions Trigger + type: boolean + deleteImageTag: + description: Boolean flag for setting delete image tag event + for Yandex Cloud Functions Trigger + type: boolean + imageName: + description: Image name filter setting for Yandex Cloud + Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + dataStreams: + description: Data Streams settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + database: + description: Stream database for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + streamName: + description: Stream name for Yandex Cloud Functions Trigger + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Functions Trigger + type: string + dlq: + description: Dead Letter Queue settings definition for Yandex + Cloud Functions Trigger + items: + properties: + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + folderId: + description: Folder ID for the Yandex Cloud Functions Trigger + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + function: + description: Yandex.Cloud Function settings definition for Yandex + Cloud Functions Trigger + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + iot: + description: IoT settings definition for Yandex Cloud Functions + Trigger, if present. Only one section iot or message_queue or + object_storage or timer can be defined. + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + deviceId: + description: IoT Device ID for Yandex Cloud Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + topic: + description: IoT Topic for Yandex Cloud Functions Trigger + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Functions Trigger + type: object + x-kubernetes-map-type: granular + logGroup: + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + logGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + logging: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + groupId: + description: Logging group ID for Yandex Cloud Functions + Trigger + type: string + levels: + description: Logging level filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceIds: + description: Resource ID filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTypes: + description: Resource type filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + streamNames: + description: Logging stream name filter setting for Yandex + Cloud Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + mail: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + attachmentsBucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + messageQueue: + description: Message Queue settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + visibilityTimeout: + description: Visibility timeout for Yandex Cloud Functions + Trigger + type: string + type: object + type: array + name: + description: Yandex Cloud Functions Trigger name used to define + trigger + type: string + objectStorage: + description: Object Storage settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + bucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + create: + description: Boolean flag for setting create event for Yandex + Cloud Functions Trigger + type: boolean + delete: + description: Boolean flag for setting delete event for Yandex + Cloud Functions Trigger + type: boolean + prefix: + description: Prefix for Object Storage for Yandex Cloud + Functions Trigger + type: string + suffix: + description: Suffix for Object Storage for Yandex Cloud + Functions Trigger + type: string + update: + description: Boolean flag for setting update event for Yandex + Cloud Functions Trigger + type: boolean + type: object + type: array + timer: + description: Timer settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + cronExpression: + description: Cron expression for timer for Yandex Cloud + Functions Trigger + type: string + payload: + description: Payload to be passed to function + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: TriggerStatus defines the observed state of Trigger. + properties: + atProvider: + properties: + container: + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + path: + description: Path for Yandex.Cloud Serverless Container + for Yandex Cloud Functions Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + containerRegistry: + description: Container Registry settings definition for Yandex + Cloud Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + createImage: + description: Boolean flag for setting create image event + for Yandex Cloud Functions Trigger + type: boolean + createImageTag: + description: Boolean flag for setting create image tag event + for Yandex Cloud Functions Trigger + type: boolean + deleteImage: + description: Boolean flag for setting delete image event + for Yandex Cloud Functions Trigger + type: boolean + deleteImageTag: + description: Boolean flag for setting delete image tag event + for Yandex Cloud Functions Trigger + type: boolean + imageName: + description: Image name filter setting for Yandex Cloud + Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the Yandex Cloud Functions + Trigger + type: string + dataStreams: + description: Data Streams settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + database: + description: Stream database for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + streamName: + description: Stream name for Yandex Cloud Functions Trigger + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Functions Trigger + type: string + dlq: + description: Dead Letter Queue settings definition for Yandex + Cloud Functions Trigger + items: + properties: + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + folderId: + description: Folder ID for the Yandex Cloud Functions Trigger + type: string + function: + description: Yandex.Cloud Function settings definition for Yandex + Cloud Functions Trigger + items: + properties: + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + retryAttempts: + description: Retry attempts for Yandex.Cloud Function for + Yandex Cloud Functions Trigger + type: string + retryInterval: + description: Retry interval in seconds for Yandex.Cloud + Function for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + tag: + description: Tag for Yandex.Cloud Function for Yandex Cloud + Functions Trigger + type: string + type: object + type: array + id: + description: Yandex.Cloud Function ID for Yandex Cloud Functions + Trigger + type: string + iot: + description: IoT settings definition for Yandex Cloud Functions + Trigger, if present. Only one section iot or message_queue or + object_storage or timer can be defined. + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + deviceId: + description: IoT Device ID for Yandex Cloud Functions Trigger + type: string + registryId: + description: IoT Registry ID for Yandex Cloud Functions + Trigger + type: string + topic: + description: IoT Topic for Yandex Cloud Functions Trigger + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Functions Trigger + type: object + x-kubernetes-map-type: granular + logGroup: + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + logGroupIds: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + logging: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + groupId: + description: Logging group ID for Yandex Cloud Functions + Trigger + type: string + levels: + description: Logging level filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceIds: + description: Resource ID filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + resourceTypes: + description: Resource type filter setting for Yandex Cloud + Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + streamNames: + description: Logging stream name filter setting for Yandex + Cloud Functions Trigger + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + mail: + description: Logging settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + attachmentsBucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + type: object + type: array + messageQueue: + description: Message Queue settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + queueId: + description: Message Queue ID for Yandex Cloud Functions + Trigger + type: string + serviceAccountId: + description: Message Queue Service Account ID for Yandex + Cloud Functions Trigger + type: string + visibilityTimeout: + description: Visibility timeout for Yandex Cloud Functions + Trigger + type: string + type: object + type: array + name: + description: Yandex Cloud Functions Trigger name used to define + trigger + type: string + objectStorage: + description: Object Storage settings definition for Yandex Cloud + Functions Trigger, if present + items: + properties: + batchCutoff: + description: Batch Duration in seconds for Yandex Cloud + Functions Trigger + type: string + batchSize: + description: Batch Size for Yandex Cloud Functions Trigger + type: string + bucketId: + description: Object Storage Bucket ID for Yandex Cloud Functions + Trigger + type: string + create: + description: Boolean flag for setting create event for Yandex + Cloud Functions Trigger + type: boolean + delete: + description: Boolean flag for setting delete event for Yandex + Cloud Functions Trigger + type: boolean + prefix: + description: Prefix for Object Storage for Yandex Cloud + Functions Trigger + type: string + suffix: + description: Suffix for Object Storage for Yandex Cloud + Functions Trigger + type: string + update: + description: Boolean flag for setting update event for Yandex + Cloud Functions Trigger + type: boolean + type: object + type: array + timer: + description: Timer settings definition for Yandex Cloud Functions + Trigger, if present + items: + properties: + cronExpression: + description: Cron expression for timer for Yandex Cloud + Functions Trigger + type: string + payload: + description: Payload to be passed to function + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iot.yandex-cloud.upjet.crossplane.io_corebrokers.yaml b/package/crds/iot.yandex-cloud.upjet.crossplane.io_corebrokers.yaml new file mode 100644 index 0000000..ce14c5c --- /dev/null +++ b/package/crds/iot.yandex-cloud.upjet.crossplane.io_corebrokers.yaml @@ -0,0 +1,612 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: corebrokers.iot.yandex-cloud.upjet.crossplane.io +spec: + group: iot.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CoreBroker + listKind: CoreBrokerList + plural: corebrokers + singular: corebroker + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CoreBroker is the Schema for the CoreBrokers API. Allows management + of a Yandex.Cloud IoT Core Broker. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CoreBrokerSpec defines the desired state of CoreBroker + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Broker + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Broker + type: string + folderId: + description: Folder ID for the IoT Core Broker + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Broker. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Broker + items: + properties: + disabled: + description: Is logging for broker disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Broker name used to define broker + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Broker + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Broker + type: string + folderId: + description: Folder ID for the IoT Core Broker + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Broker. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Broker + items: + properties: + disabled: + description: Is logging for broker disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Broker name used to define broker + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CoreBrokerStatus defines the observed state of CoreBroker. + properties: + atProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Broker + items: + type: string + type: array + x-kubernetes-list-type: set + createdAt: + description: Creation timestamp of the IoT Core Broker + type: string + description: + description: Description of the IoT Core Broker + type: string + folderId: + description: Folder ID for the IoT Core Broker + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Broker. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Broker + items: + properties: + disabled: + description: Is logging for broker disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Broker name used to define broker + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iot.yandex-cloud.upjet.crossplane.io_coredevices.yaml b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coredevices.yaml new file mode 100644 index 0000000..4646788 --- /dev/null +++ b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coredevices.yaml @@ -0,0 +1,432 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: coredevices.iot.yandex-cloud.upjet.crossplane.io +spec: + group: iot.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CoreDevice + listKind: CoreDeviceList + plural: coredevices + singular: coredevice + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CoreDevice is the Schema for the CoreDevices API. Allows management + of a Yandex.Cloud IoT Core Device. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CoreDeviceSpec defines the desired state of CoreDevice + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aliases: + additionalProperties: + type: string + description: A set of key/value aliases pairs to assign to the + IoT Core Device + type: object + x-kubernetes-map-type: granular + certificates: + description: A set of certificate's fingerprints for the IoT Core + Device + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Device + type: string + name: + description: IoT Core Device name used to define device + type: string + passwordsSecretRef: + description: A set of passwords's id for the IoT Core Device + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + registryId: + description: IoT Core Registry ID for the IoT Core Device + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aliases: + additionalProperties: + type: string + description: A set of key/value aliases pairs to assign to the + IoT Core Device + type: object + x-kubernetes-map-type: granular + certificates: + description: A set of certificate's fingerprints for the IoT Core + Device + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Device + type: string + name: + description: IoT Core Device name used to define device + type: string + passwordsSecretRef: + items: + type: string + type: array + registryId: + description: IoT Core Registry ID for the IoT Core Device + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.registryId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.registryId) + || (has(self.initProvider) && has(self.initProvider.registryId))' + status: + description: CoreDeviceStatus defines the observed state of CoreDevice. + properties: + atProvider: + properties: + aliases: + additionalProperties: + type: string + description: A set of key/value aliases pairs to assign to the + IoT Core Device + type: object + x-kubernetes-map-type: granular + certificates: + description: A set of certificate's fingerprints for the IoT Core + Device + items: + type: string + type: array + x-kubernetes-list-type: set + createdAt: + description: Creation timestamp of the IoT Core Device + type: string + description: + description: Description of the IoT Core Device + type: string + id: + type: string + name: + description: IoT Core Device name used to define device + type: string + registryId: + description: IoT Core Registry ID for the IoT Core Device + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/iot.yandex-cloud.upjet.crossplane.io_coreregistries.yaml b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coreregistries.yaml new file mode 100644 index 0000000..78cad3c --- /dev/null +++ b/package/crds/iot.yandex-cloud.upjet.crossplane.io_coreregistries.yaml @@ -0,0 +1,637 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: coreregistries.iot.yandex-cloud.upjet.crossplane.io +spec: + group: iot.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: CoreRegistry + listKind: CoreRegistryList + plural: coreregistries + singular: coreregistry + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CoreRegistry is the Schema for the CoreRegistrys API. Allows + management of a Yandex.Cloud IoT Core Registry. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CoreRegistrySpec defines the desired state of CoreRegistry + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Registry + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Registry + type: string + folderId: + description: Folder ID for the IoT Core Registry + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Registry. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Registry + items: + properties: + disabled: + description: Is logging for registry disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Device name used to define registry + type: string + passwordsSecretRef: + description: A set of passwords's id for the IoT Core Registry + items: + description: A SecretKeySelector is a reference to a secret + key in an arbitrary namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Registry + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: Description of the IoT Core Registry + type: string + folderId: + description: Folder ID for the IoT Core Registry + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Registry. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Registry + items: + properties: + disabled: + description: Is logging for registry disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Device name used to define registry + type: string + passwordsSecretRef: + items: + type: string + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CoreRegistryStatus defines the observed state of CoreRegistry. + properties: + atProvider: + properties: + certificates: + description: A set of certificate's fingerprints for the IoT Core + Registry + items: + type: string + type: array + x-kubernetes-list-type: set + createdAt: + description: Creation timestamp of the IoT Core Registry + type: string + description: + description: Description of the IoT Core Registry + type: string + folderId: + description: Folder ID for the IoT Core Registry + type: string + id: + type: string + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the IoT + Core Registry. + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging for IoT Core Registry + items: + properties: + disabled: + description: Is logging for registry disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + name: + description: IoT Core Device name used to define registry + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml index dcf4d80..5167fa9 100644 --- a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbdatabases.yaml @@ -149,6 +149,9 @@ spec: type: string type: object type: object + name: + description: The name of the database. + type: string type: object initProvider: description: |- @@ -239,6 +242,9 @@ spec: type: string type: object type: object + name: + description: The name of the database. + type: string type: object managementPolicies: default: @@ -407,6 +413,11 @@ spec: required: - forProvider type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' status: description: MongodbDatabaseStatus defines the observed state of MongodbDatabase. properties: @@ -416,6 +427,9 @@ spec: type: string id: type: string + name: + description: The name of the database. + type: string type: object conditions: description: Conditions of the resource. diff --git a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml index 14c9cf3..3372a45 100644 --- a/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml +++ b/package/crds/mdb.yandex-cloud.upjet.crossplane.io_mongodbusers.yaml @@ -149,6 +149,9 @@ spec: type: string type: object type: object + name: + description: The name of the user. + type: string passwordSecretRef: description: The password of the user. properties: @@ -274,6 +277,9 @@ spec: type: string type: object type: object + name: + description: The name of the user. + type: string passwordSecretRef: description: The password of the user. properties: @@ -480,6 +486,10 @@ spec: - forProvider type: object x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' - message: spec.forProvider.passwordSecretRef is a required parameter rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' @@ -492,6 +502,9 @@ spec: type: string id: type: string + name: + description: The name of the user. + type: string permission: description: Set of permissions granted to the user. The structure is documented below. diff --git a/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containeriambindings.yaml b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containeriambindings.yaml new file mode 100644 index 0000000..a177100 --- /dev/null +++ b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containeriambindings.yaml @@ -0,0 +1,390 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: containeriambindings.serverless.yandex-cloud.upjet.crossplane.io +spec: + group: serverless.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: ContainerIAMBinding + listKind: ContainerIAMBindingList + plural: containeriambindings + singular: containeriambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ContainerIAMBinding is the Schema for the ContainerIAMBindings + API. Allows management of a single IAM binding for a + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerIAMBindingSpec defines the desired state of ContainerIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerId: + description: The Yandex Serverless Container ID to apply a binding + to. + type: string + members: + description: |- + Identities that will be granted the privilege in role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. + type: string + sleepAfter: + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerId: + description: The Yandex Serverless Container ID to apply a binding + to. + type: string + members: + description: |- + Identities that will be granted the privilege in role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. + type: string + sleepAfter: + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.containerId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.containerId) + || (has(self.initProvider) && has(self.initProvider.containerId))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: ContainerIAMBindingStatus defines the observed state of ContainerIAMBinding. + properties: + atProvider: + properties: + containerId: + description: The Yandex Serverless Container ID to apply a binding + to. + type: string + id: + type: string + members: + description: |- + Identities that will be granted the privilege in role. + Each entry can have one of the following values: + items: + type: string + type: array + x-kubernetes-list-type: set + role: + description: The role that should be applied. + type: string + sleepAfter: + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containers.yaml b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containers.yaml new file mode 100644 index 0000000..6e024cd --- /dev/null +++ b/package/crds/serverless.yandex-cloud.upjet.crossplane.io_containers.yaml @@ -0,0 +1,946 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: containers.serverless.yandex-cloud.upjet.crossplane.io +spec: + group: serverless.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Container + listKind: ContainerList + plural: containers + singular: container + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Container is the Schema for the Containers API. Allows management + of a Yandex Cloud Serverless Container. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerSpec defines the desired state of Container + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + concurrency: + description: Concurrency of Yandex Cloud Serverless Container + type: number + connectivity: + description: Network access. If specified the revision will be + attached to specified network + items: + properties: + networkId: + description: Network the revision will have access to + type: string + type: object + type: array + coreFraction: + description: Core fraction (0...100) of the Yandex Cloud Serverless + Container + type: number + cores: + type: number + description: + description: Description of the Yandex Cloud Serverless Container + type: string + executionTimeout: + description: Execution timeout in seconds (duration format) for + Yandex Cloud Serverless Container + type: string + folderId: + description: Folder ID for the Yandex Cloud Serverless Container + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + image: + description: Revision deployment image for Yandex Cloud Serverless + Container + items: + properties: + args: + description: List of arguments for Yandex Cloud Serverless + Container + items: + type: string + type: array + command: + description: List of commands for Yandex Cloud Serverless + Container + items: + type: string + type: array + digest: + description: |- + Digest of image that will be deployed as Yandex Cloud Serverless Container. + If presented, should be equal to digest that will be resolved at server side by URL. + Container will be updated on digest change even if image.0.url stays the same. + If field not specified then its value will be computed. + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variable pairs + for Yandex Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + url: + description: Invoke URL for the Yandex Cloud Serverless + Container + type: string + workDir: + description: Working directory for Yandex Cloud Serverless + Container + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Serverless + Container + items: + properties: + disabled: + description: Is logging from container disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: |- + Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + Container memory in megabytes, should be aligned to 128 + type: number + name: + description: Yandex Cloud Serverless Container name + type: string + secrets: + description: Secrets for Yandex Cloud Serverless Container + items: + properties: + environmentVariable: + description: Container's environment variable in which secret's + value will be stored. + type: string + id: + description: Secret's id. + type: string + key: + description: Secret's entries key which value will be stored + in environment variable. + type: string + versionId: + description: Secret's version id. + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Serverless Container + type: string + storageMounts: + description: Storage mounts for Yandex Cloud Serverless Container + items: + properties: + bucket: + description: Name of the mounting bucket. + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted. + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted. + type: string + readOnly: + description: Mount the bucket in read-only mode. + type: boolean + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + concurrency: + description: Concurrency of Yandex Cloud Serverless Container + type: number + connectivity: + description: Network access. If specified the revision will be + attached to specified network + items: + properties: + networkId: + description: Network the revision will have access to + type: string + type: object + type: array + coreFraction: + description: Core fraction (0...100) of the Yandex Cloud Serverless + Container + type: number + cores: + type: number + description: + description: Description of the Yandex Cloud Serverless Container + type: string + executionTimeout: + description: Execution timeout in seconds (duration format) for + Yandex Cloud Serverless Container + type: string + folderId: + description: Folder ID for the Yandex Cloud Serverless Container + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + image: + description: Revision deployment image for Yandex Cloud Serverless + Container + items: + properties: + args: + description: List of arguments for Yandex Cloud Serverless + Container + items: + type: string + type: array + command: + description: List of commands for Yandex Cloud Serverless + Container + items: + type: string + type: array + digest: + description: |- + Digest of image that will be deployed as Yandex Cloud Serverless Container. + If presented, should be equal to digest that will be resolved at server side by URL. + Container will be updated on digest change even if image.0.url stays the same. + If field not specified then its value will be computed. + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variable pairs + for Yandex Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + url: + description: Invoke URL for the Yandex Cloud Serverless + Container + type: string + workDir: + description: Working directory for Yandex Cloud Serverless + Container + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Serverless + Container + items: + properties: + disabled: + description: Is logging from container disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: |- + Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + Container memory in megabytes, should be aligned to 128 + type: number + name: + description: Yandex Cloud Serverless Container name + type: string + secrets: + description: Secrets for Yandex Cloud Serverless Container + items: + properties: + environmentVariable: + description: Container's environment variable in which secret's + value will be stored. + type: string + id: + description: Secret's id. + type: string + key: + description: Secret's entries key which value will be stored + in environment variable. + type: string + versionId: + description: Secret's version id. + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Serverless Container + type: string + storageMounts: + description: Storage mounts for Yandex Cloud Serverless Container + items: + properties: + bucket: + description: Name of the mounting bucket. + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted. + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted. + type: string + readOnly: + description: Mount the bucket in read-only mode. + type: boolean + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.image is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.image) + || (has(self.initProvider) && has(self.initProvider.image))' + - message: spec.forProvider.memory is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.memory) + || (has(self.initProvider) && has(self.initProvider.memory))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ContainerStatus defines the observed state of Container. + properties: + atProvider: + properties: + concurrency: + description: Concurrency of Yandex Cloud Serverless Container + type: number + connectivity: + description: Network access. If specified the revision will be + attached to specified network + items: + properties: + networkId: + description: Network the revision will have access to + type: string + type: object + type: array + coreFraction: + description: Core fraction (0...100) of the Yandex Cloud Serverless + Container + type: number + cores: + type: number + createdAt: + description: Creation timestamp of the Yandex Cloud Serverless + Container + type: string + description: + description: Description of the Yandex Cloud Serverless Container + type: string + executionTimeout: + description: Execution timeout in seconds (duration format) for + Yandex Cloud Serverless Container + type: string + folderId: + description: Folder ID for the Yandex Cloud Serverless Container + type: string + id: + description: Secret's id. + type: string + image: + description: Revision deployment image for Yandex Cloud Serverless + Container + items: + properties: + args: + description: List of arguments for Yandex Cloud Serverless + Container + items: + type: string + type: array + command: + description: List of commands for Yandex Cloud Serverless + Container + items: + type: string + type: array + digest: + description: |- + Digest of image that will be deployed as Yandex Cloud Serverless Container. + If presented, should be equal to digest that will be resolved at server side by URL. + Container will be updated on digest change even if image.0.url stays the same. + If field not specified then its value will be computed. + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variable pairs + for Yandex Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + url: + description: Invoke URL for the Yandex Cloud Serverless + Container + type: string + workDir: + description: Working directory for Yandex Cloud Serverless + Container + type: string + type: object + type: array + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Serverless Container + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Serverless + Container + items: + properties: + disabled: + description: Is logging from container disabled + type: boolean + folderId: + description: Log entries are written to default log group + for specified folder + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: |- + Memory in megabytes (aligned to 128MB) for Yandex Cloud Serverless Container + Container memory in megabytes, should be aligned to 128 + type: number + name: + description: Yandex Cloud Serverless Container name + type: string + revisionId: + description: Last revision ID of the Yandex Cloud Serverless Container + type: string + secrets: + description: Secrets for Yandex Cloud Serverless Container + items: + properties: + environmentVariable: + description: Container's environment variable in which secret's + value will be stored. + type: string + id: + description: Secret's id. + type: string + key: + description: Secret's entries key which value will be stored + in environment variable. + type: string + versionId: + description: Secret's version id. + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Serverless Container + type: string + storageMounts: + description: Storage mounts for Yandex Cloud Serverless Container + items: + properties: + bucket: + description: Name of the mounting bucket. + type: string + mountPointPath: + description: Path inside the container to access the directory + in which the bucket is mounted. + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted. + type: string + readOnly: + description: Mount the bucket in read-only mode. + type: boolean + type: object + type: array + url: + description: Invoke URL for the Yandex Cloud Serverless Container + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/yandex.yandex-cloud.upjet.crossplane.io_functions.yaml b/package/crds/yandex.yandex-cloud.upjet.crossplane.io_functions.yaml new file mode 100644 index 0000000..d0d27c1 --- /dev/null +++ b/package/crds/yandex.yandex-cloud.upjet.crossplane.io_functions.yaml @@ -0,0 +1,1103 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: functions.yandex.yandex-cloud.upjet.crossplane.io +spec: + group: yandex.yandex-cloud.upjet.crossplane.io + names: + categories: + - crossplane + - managed + - yandex-cloud + kind: Function + listKind: FunctionList + plural: functions + singular: function + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Function is the Schema for the Functions API. Allows management + of a Yandex Cloud Function. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionSpec defines the desired state of Function + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + asyncInvocation: + description: Config for asynchronous invocations of Yandex Cloud + Function. + items: + properties: + retriesCount: + description: Maximum number of retries for async invocation + type: number + serviceAccountId: + description: Service account used for async invocation + type: string + ymqFailureTarget: + description: Target for unsuccessful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + type: object + type: array + ymqSuccessTarget: + description: Target for successful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account used for writing result + to queue + type: string + type: object + type: array + type: object + type: array + concurrency: + description: The maximum number of requests processed by a function + instance at the same time. + type: number + connectivity: + description: Function version connectivity. If specified the version + will be attached to specified network. + items: + properties: + networkId: + description: Network the version will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + content: + description: Version deployment content for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified. + items: + properties: + zipFilename: + description: Filename to zip archive for the version. + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Function + type: string + entrypoint: + description: Entrypoint for Yandex Cloud Function + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variables for Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + executionTimeout: + description: Execution timeout in seconds for Yandex Cloud Function + type: string + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Function. + items: + properties: + disabled: + description: Is logging from function disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: Memory in megabytes (aligned to 128MB) for Yandex + Cloud Function + type: number + name: + description: Yandex Cloud Function name used to define trigger + type: string + package: + description: Version deployment package for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified. + items: + properties: + bucketName: + description: Name of the bucket that stores the code for + the version. + type: string + objectName: + description: Name of the object in the bucket that stores + the code for the version. + type: string + sha256: + description: SHA256 hash of the version deployment package. + type: string + type: object + type: array + runtime: + description: Runtime for Yandex Cloud Function + type: string + secrets: + description: Secrets for Yandex Cloud Function. + items: + properties: + environmentVariable: + description: Function's environment variable in which secret's + value will be stored. + type: string + id: + description: Secret's id. + type: string + key: + description: Secret's entries key which value will be stored + in environment variable. + type: string + versionId: + description: Secret's version id. + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + storageMounts: + description: Storage mounts for Yandex Cloud Function. + items: + properties: + bucket: + description: Name of the mounting bucket. + type: string + mountPointName: + description: Name of the mount point. The directory where + the bucket is mounted will be accessible at the /function/storage/ + path. + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted. + type: string + readOnly: + description: Mount the bucket in read-only mode. + type: boolean + type: object + type: array + tags: + description: Tags for Yandex Cloud Function. Tag "$latest" isn't + returned. + items: + type: string + type: array + x-kubernetes-list-type: set + tmpfsSize: + description: Tmpfs size for Yandex Cloud Function. + type: number + userHash: + description: User-defined string for current function version. + User must change this string any times when function changed. + Function will be updated when hash is changed. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + asyncInvocation: + description: Config for asynchronous invocations of Yandex Cloud + Function. + items: + properties: + retriesCount: + description: Maximum number of retries for async invocation + type: number + serviceAccountId: + description: Service account used for async invocation + type: string + ymqFailureTarget: + description: Target for unsuccessful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + type: object + type: array + ymqSuccessTarget: + description: Target for successful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account used for writing result + to queue + type: string + type: object + type: array + type: object + type: array + concurrency: + description: The maximum number of requests processed by a function + instance at the same time. + type: number + connectivity: + description: Function version connectivity. If specified the version + will be attached to specified network. + items: + properties: + networkId: + description: Network the version will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + content: + description: Version deployment content for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified. + items: + properties: + zipFilename: + description: Filename to zip archive for the version. + type: string + type: object + type: array + description: + description: Description of the Yandex Cloud Function + type: string + entrypoint: + description: Entrypoint for Yandex Cloud Function + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variables for Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + executionTimeout: + description: Execution timeout in seconds for Yandex Cloud Function + type: string + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + folderIdRef: + description: Reference to a Folder in resourcemanager to populate + folderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + folderIdSelector: + description: Selector for a Folder in resourcemanager to populate + folderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Function. + items: + properties: + disabled: + description: Is logging from function disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + memory: + description: Memory in megabytes (aligned to 128MB) for Yandex + Cloud Function + type: number + name: + description: Yandex Cloud Function name used to define trigger + type: string + package: + description: Version deployment package for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified. + items: + properties: + bucketName: + description: Name of the bucket that stores the code for + the version. + type: string + objectName: + description: Name of the object in the bucket that stores + the code for the version. + type: string + sha256: + description: SHA256 hash of the version deployment package. + type: string + type: object + type: array + runtime: + description: Runtime for Yandex Cloud Function + type: string + secrets: + description: Secrets for Yandex Cloud Function. + items: + properties: + environmentVariable: + description: Function's environment variable in which secret's + value will be stored. + type: string + id: + description: Secret's id. + type: string + key: + description: Secret's entries key which value will be stored + in environment variable. + type: string + versionId: + description: Secret's version id. + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + storageMounts: + description: Storage mounts for Yandex Cloud Function. + items: + properties: + bucket: + description: Name of the mounting bucket. + type: string + mountPointName: + description: Name of the mount point. The directory where + the bucket is mounted will be accessible at the /function/storage/ + path. + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted. + type: string + readOnly: + description: Mount the bucket in read-only mode. + type: boolean + type: object + type: array + tags: + description: Tags for Yandex Cloud Function. Tag "$latest" isn't + returned. + items: + type: string + type: array + x-kubernetes-list-type: set + tmpfsSize: + description: Tmpfs size for Yandex Cloud Function. + type: number + userHash: + description: User-defined string for current function version. + User must change this string any times when function changed. + Function will be updated when hash is changed. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.entrypoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.entrypoint) + || (has(self.initProvider) && has(self.initProvider.entrypoint))' + - message: spec.forProvider.memory is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.memory) + || (has(self.initProvider) && has(self.initProvider.memory))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.runtime is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.runtime) + || (has(self.initProvider) && has(self.initProvider.runtime))' + - message: spec.forProvider.userHash is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.userHash) + || (has(self.initProvider) && has(self.initProvider.userHash))' + status: + description: FunctionStatus defines the observed state of Function. + properties: + atProvider: + properties: + asyncInvocation: + description: Config for asynchronous invocations of Yandex Cloud + Function. + items: + properties: + retriesCount: + description: Maximum number of retries for async invocation + type: number + serviceAccountId: + description: Service account used for async invocation + type: string + ymqFailureTarget: + description: Target for unsuccessful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + type: object + type: array + ymqSuccessTarget: + description: Target for successful async invocation + items: + properties: + arn: + description: YMQ ARN + type: string + serviceAccountId: + description: Service account used for writing result + to queue + type: string + type: object + type: array + type: object + type: array + concurrency: + description: The maximum number of requests processed by a function + instance at the same time. + type: number + connectivity: + description: Function version connectivity. If specified the version + will be attached to specified network. + items: + properties: + networkId: + description: Network the version will have access to. It's + essential to specify network with subnets in all availability + zones. + type: string + type: object + type: array + content: + description: Version deployment content for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified. + items: + properties: + zipFilename: + description: Filename to zip archive for the version. + type: string + type: object + type: array + createdAt: + description: Creation timestamp of the Yandex Cloud Function. + type: string + description: + description: Description of the Yandex Cloud Function + type: string + entrypoint: + description: Entrypoint for Yandex Cloud Function + type: string + environment: + additionalProperties: + type: string + description: A set of key/value environment variables for Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + executionTimeout: + description: Execution timeout in seconds for Yandex Cloud Function + type: string + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + id: + description: Secret's id. + type: string + imageSize: + description: Image size for Yandex Cloud Function. + type: number + labels: + additionalProperties: + type: string + description: A set of key/value label pairs to assign to the Yandex + Cloud Function + type: object + x-kubernetes-map-type: granular + logOptions: + description: Options for logging from Yandex Cloud Function. + items: + properties: + disabled: + description: Is logging from function disabled + type: boolean + folderId: + description: Folder ID for the Yandex Cloud Function + type: string + logGroupId: + description: Log entries are written to specified log group + type: string + minLevel: + description: Minimum log entry level + type: string + type: object + type: array + loggroupId: + description: Loggroup ID size for Yandex Cloud Function. + type: string + memory: + description: Memory in megabytes (aligned to 128MB) for Yandex + Cloud Function + type: number + name: + description: Yandex Cloud Function name used to define trigger + type: string + package: + description: Version deployment package for Yandex Cloud Function + code. Can be only one package or content section. Either package + or content section must be specified. + items: + properties: + bucketName: + description: Name of the bucket that stores the code for + the version. + type: string + objectName: + description: Name of the object in the bucket that stores + the code for the version. + type: string + sha256: + description: SHA256 hash of the version deployment package. + type: string + type: object + type: array + runtime: + description: Runtime for Yandex Cloud Function + type: string + secrets: + description: Secrets for Yandex Cloud Function. + items: + properties: + environmentVariable: + description: Function's environment variable in which secret's + value will be stored. + type: string + id: + description: Secret's id. + type: string + key: + description: Secret's entries key which value will be stored + in environment variable. + type: string + versionId: + description: Secret's version id. + type: string + type: object + type: array + serviceAccountId: + description: Service account ID for Yandex Cloud Function + type: string + storageMounts: + description: Storage mounts for Yandex Cloud Function. + items: + properties: + bucket: + description: Name of the mounting bucket. + type: string + mountPointName: + description: Name of the mount point. The directory where + the bucket is mounted will be accessible at the /function/storage/ + path. + type: string + prefix: + description: Prefix within the bucket. If you leave this + field empty, the entire bucket will be mounted. + type: string + readOnly: + description: Mount the bucket in read-only mode. + type: boolean + type: object + type: array + tags: + description: Tags for Yandex Cloud Function. Tag "$latest" isn't + returned. + items: + type: string + type: array + x-kubernetes-list-type: set + tmpfsSize: + description: Tmpfs size for Yandex Cloud Function. + type: number + userHash: + description: User-defined string for current function version. + User must change this string any times when function changed. + Function will be updated when hash is changed. + type: string + version: + description: Version for Yandex Cloud Function. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}